execve.cpp 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2022, the SerenityOS developers.
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/ScopeGuard.h>
  8. #include <AK/TemporaryChange.h>
  9. #include <Kernel/Arch/CPU.h>
  10. #include <Kernel/Debug.h>
  11. #include <Kernel/FileSystem/Custody.h>
  12. #include <Kernel/FileSystem/OpenFileDescription.h>
  13. #include <Kernel/FileSystem/VirtualFileSystem.h>
  14. #include <Kernel/Memory/MemoryManager.h>
  15. #include <Kernel/Memory/Region.h>
  16. #include <Kernel/Memory/SharedInodeVMObject.h>
  17. #include <Kernel/Panic.h>
  18. #include <Kernel/PerformanceManager.h>
  19. #include <Kernel/Random.h>
  20. #include <Kernel/Tasks/Process.h>
  21. #include <Kernel/Tasks/Scheduler.h>
  22. #include <Kernel/Time/TimeManagement.h>
  23. #include <LibELF/AuxiliaryVector.h>
  24. #include <LibELF/Image.h>
  25. #include <LibELF/Validation.h>
  26. namespace Kernel {
  27. extern Memory::Region* g_signal_trampoline_region;
  28. struct LoadResult {
  29. FlatPtr load_base { 0 };
  30. FlatPtr entry_eip { 0 };
  31. size_t size { 0 };
  32. LockWeakPtr<Memory::Region> tls_region;
  33. size_t tls_size { 0 };
  34. size_t tls_alignment { 0 };
  35. LockWeakPtr<Memory::Region> stack_region;
  36. };
  37. static constexpr size_t auxiliary_vector_size = 15;
  38. static Array<ELF::AuxiliaryValue, auxiliary_vector_size> generate_auxiliary_vector(FlatPtr load_base, FlatPtr entry_eip, UserID uid, UserID euid, GroupID gid, GroupID egid, StringView executable_path, Optional<Process::ScopedDescriptionAllocation> const& main_program_fd_allocation);
  39. static bool validate_stack_size(Vector<NonnullOwnPtr<KString>> const& arguments, Vector<NonnullOwnPtr<KString>>& environment, Array<ELF::AuxiliaryValue, auxiliary_vector_size> const& auxiliary)
  40. {
  41. size_t total_arguments_size = 0;
  42. size_t total_environment_size = 0;
  43. size_t total_auxiliary_size = 0;
  44. for (auto const& a : arguments)
  45. total_arguments_size += a->length() + 1;
  46. for (auto const& e : environment)
  47. total_environment_size += e->length() + 1;
  48. for (auto const& v : auxiliary) {
  49. if (!v.optional_string.is_empty())
  50. total_auxiliary_size += round_up_to_power_of_two(v.optional_string.length() + 1, sizeof(FlatPtr));
  51. if (v.auxv.a_type == ELF::AuxiliaryValue::Random)
  52. total_auxiliary_size += round_up_to_power_of_two(16, sizeof(FlatPtr));
  53. }
  54. total_arguments_size += sizeof(char*) * (arguments.size() + 1);
  55. total_environment_size += sizeof(char*) * (environment.size() + 1);
  56. total_auxiliary_size += sizeof(auxv_t) * auxiliary.size();
  57. if (total_arguments_size > Process::max_arguments_size)
  58. return false;
  59. if (total_environment_size > Process::max_environment_size)
  60. return false;
  61. if (total_auxiliary_size > Process::max_auxiliary_size)
  62. return false;
  63. return true;
  64. }
  65. static ErrorOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]] ThreadRegisters& regs, Memory::Region& region, Vector<NonnullOwnPtr<KString>> const& arguments,
  66. Vector<NonnullOwnPtr<KString>> const& environment, Array<ELF::AuxiliaryValue, auxiliary_vector_size> auxiliary_values)
  67. {
  68. FlatPtr new_sp = region.range().end().get();
  69. // Add some bits of randomness to the user stack pointer.
  70. new_sp -= round_up_to_power_of_two(get_fast_random<u32>() % 4096, 16);
  71. auto push_on_new_stack = [&new_sp](FlatPtr value) {
  72. new_sp -= sizeof(FlatPtr);
  73. Userspace<FlatPtr*> stack_ptr = new_sp;
  74. auto result = copy_to_user(stack_ptr, &value);
  75. VERIFY(!result.is_error());
  76. };
  77. auto push_aux_value_on_new_stack = [&new_sp](auxv_t value) {
  78. new_sp -= sizeof(auxv_t);
  79. Userspace<auxv_t*> stack_ptr = new_sp;
  80. auto result = copy_to_user(stack_ptr, &value);
  81. VERIFY(!result.is_error());
  82. };
  83. auto push_string_on_new_stack = [&new_sp](StringView string) {
  84. new_sp -= round_up_to_power_of_two(string.length() + 1, sizeof(FlatPtr));
  85. Userspace<FlatPtr*> stack_ptr = new_sp;
  86. auto result = copy_to_user(stack_ptr, string.characters_without_null_termination(), string.length() + 1);
  87. VERIFY(!result.is_error());
  88. };
  89. Vector<FlatPtr> argv_entries;
  90. for (auto const& argument : arguments) {
  91. push_string_on_new_stack(argument->view());
  92. TRY(argv_entries.try_append(new_sp));
  93. }
  94. Vector<FlatPtr> env_entries;
  95. for (auto const& variable : environment) {
  96. push_string_on_new_stack(variable->view());
  97. TRY(env_entries.try_append(new_sp));
  98. }
  99. for (auto& value : auxiliary_values) {
  100. if (!value.optional_string.is_empty()) {
  101. push_string_on_new_stack(value.optional_string);
  102. value.auxv.a_un.a_ptr = (void*)new_sp;
  103. }
  104. if (value.auxv.a_type == ELF::AuxiliaryValue::Random) {
  105. u8 random_bytes[16] {};
  106. get_fast_random_bytes({ random_bytes, sizeof(random_bytes) });
  107. push_string_on_new_stack({ random_bytes, sizeof(random_bytes) });
  108. value.auxv.a_un.a_ptr = (void*)new_sp;
  109. }
  110. }
  111. for (ssize_t i = auxiliary_values.size() - 1; i >= 0; --i) {
  112. auto& value = auxiliary_values[i];
  113. push_aux_value_on_new_stack(value.auxv);
  114. }
  115. push_on_new_stack(0);
  116. for (ssize_t i = env_entries.size() - 1; i >= 0; --i)
  117. push_on_new_stack(env_entries[i]);
  118. FlatPtr envp = new_sp;
  119. push_on_new_stack(0);
  120. for (ssize_t i = argv_entries.size() - 1; i >= 0; --i)
  121. push_on_new_stack(argv_entries[i]);
  122. FlatPtr argv = new_sp;
  123. // NOTE: The stack needs to be 16-byte aligned.
  124. new_sp -= new_sp % 16;
  125. #if ARCH(X86_64)
  126. regs.rdi = argv_entries.size();
  127. regs.rsi = argv;
  128. regs.rdx = envp;
  129. #elif ARCH(AARCH64)
  130. regs.x[0] = argv_entries.size();
  131. regs.x[1] = argv;
  132. regs.x[2] = envp;
  133. #else
  134. # error Unknown architecture
  135. #endif
  136. VERIFY(new_sp % 16 == 0);
  137. // FIXME: The way we're setting up the stack and passing arguments to the entry point isn't ABI-compliant
  138. return new_sp;
  139. }
  140. struct RequiredLoadRange {
  141. FlatPtr start { 0 };
  142. FlatPtr end { 0 };
  143. };
  144. static ErrorOr<RequiredLoadRange> get_required_load_range(OpenFileDescription& program_description)
  145. {
  146. auto& inode = *(program_description.inode());
  147. auto vmobject = TRY(Memory::SharedInodeVMObject::try_create_with_inode(inode));
  148. size_t executable_size = inode.size();
  149. size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
  150. auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF memory range calculation"sv, Memory::Region::Access::Read));
  151. auto elf_image = ELF::Image(region->vaddr().as_ptr(), executable_size);
  152. if (!elf_image.is_valid()) {
  153. return EINVAL;
  154. }
  155. RequiredLoadRange range {};
  156. elf_image.for_each_program_header([&range](auto const& pheader) {
  157. if (pheader.type() != PT_LOAD)
  158. return;
  159. auto region_start = (FlatPtr)pheader.vaddr().as_ptr();
  160. auto region_end = region_start + pheader.size_in_memory();
  161. if (range.start == 0 || region_start < range.start)
  162. range.start = region_start;
  163. if (range.end == 0 || region_end > range.end)
  164. range.end = region_end;
  165. });
  166. VERIFY(range.end > range.start);
  167. return range;
  168. };
  169. static ErrorOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header, OpenFileDescription& main_program_description, OpenFileDescription* interpreter_description)
  170. {
  171. constexpr FlatPtr load_range_start = 0x08000000;
  172. constexpr FlatPtr load_range_size = 65536 * PAGE_SIZE; // 2**16 * PAGE_SIZE = 256MB
  173. constexpr FlatPtr minimum_load_offset_randomization_size = 10 * MiB;
  174. auto random_load_offset_in_range([](auto start, auto size) {
  175. return Memory::page_round_down(start + get_good_random<FlatPtr>() % size);
  176. });
  177. if (main_program_header.e_type == ET_DYN) {
  178. return random_load_offset_in_range(load_range_start, load_range_size);
  179. }
  180. if (main_program_header.e_type != ET_EXEC)
  181. return EINVAL;
  182. auto main_program_load_range = TRY(get_required_load_range(main_program_description));
  183. RequiredLoadRange selected_range {};
  184. if (interpreter_description) {
  185. auto interpreter_load_range = TRY(get_required_load_range(*interpreter_description));
  186. auto interpreter_size_in_memory = interpreter_load_range.end - interpreter_load_range.start;
  187. auto interpreter_load_range_end = load_range_start + load_range_size - interpreter_size_in_memory;
  188. // No intersection
  189. if (main_program_load_range.end < load_range_start || main_program_load_range.start > interpreter_load_range_end)
  190. return random_load_offset_in_range(load_range_start, load_range_size);
  191. RequiredLoadRange first_available_part = { load_range_start, main_program_load_range.start };
  192. RequiredLoadRange second_available_part = { main_program_load_range.end, interpreter_load_range_end };
  193. // Select larger part
  194. if (first_available_part.end - first_available_part.start > second_available_part.end - second_available_part.start)
  195. selected_range = first_available_part;
  196. else
  197. selected_range = second_available_part;
  198. } else
  199. selected_range = main_program_load_range;
  200. // If main program is too big and leaves us without enough space for adequate loader randomization
  201. if (selected_range.end - selected_range.start < minimum_load_offset_randomization_size)
  202. return E2BIG;
  203. return random_load_offset_in_range(selected_range.start, selected_range.end - selected_range.start);
  204. }
  205. enum class ShouldAllocateTls {
  206. No,
  207. Yes,
  208. };
  209. enum class ShouldAllowSyscalls {
  210. No,
  211. Yes,
  212. };
  213. static ErrorOr<LoadResult> load_elf_object(Memory::AddressSpace& new_space, OpenFileDescription& object_description,
  214. FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls, Optional<size_t> minimum_stack_size = {})
  215. {
  216. auto& inode = *(object_description.inode());
  217. auto vmobject = TRY(Memory::SharedInodeVMObject::try_create_with_inode(inode));
  218. if (vmobject->writable_mappings()) {
  219. dbgln("Refusing to execute a write-mapped program");
  220. return ETXTBSY;
  221. }
  222. size_t executable_size = inode.size();
  223. size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
  224. auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF loading"sv, Memory::Region::Access::Read));
  225. auto elf_image = ELF::Image(executable_region->vaddr().as_ptr(), executable_size);
  226. if (!elf_image.is_valid())
  227. return ENOEXEC;
  228. Memory::Region* master_tls_region { nullptr };
  229. size_t master_tls_size = 0;
  230. size_t master_tls_alignment = 0;
  231. FlatPtr load_base_address = 0;
  232. size_t stack_size = Thread::default_userspace_stack_size;
  233. auto elf_name = TRY(object_description.pseudo_path());
  234. VERIFY(!Processor::in_critical());
  235. Memory::MemoryManager::enter_address_space(new_space);
  236. auto load_tls_section = [&](auto& program_header) -> ErrorOr<void> {
  237. VERIFY(should_allocate_tls == ShouldAllocateTls::Yes);
  238. VERIFY(program_header.size_in_memory());
  239. if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
  240. dbgln("Shenanigans! ELF PT_TLS header sneaks outside of executable.");
  241. return ENOEXEC;
  242. }
  243. auto region_name = TRY(KString::formatted("{} (master-tls)", elf_name));
  244. master_tls_region = TRY(new_space.allocate_region(Memory::RandomizeVirtualAddress::Yes, {}, program_header.size_in_memory(), PAGE_SIZE, region_name->view(), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
  245. master_tls_size = program_header.size_in_memory();
  246. master_tls_alignment = program_header.alignment();
  247. TRY(copy_to_user(master_tls_region->vaddr().as_ptr(), program_header.raw_data(), program_header.size_in_image()));
  248. return {};
  249. };
  250. auto load_writable_section = [&](auto& program_header) -> ErrorOr<void> {
  251. // Writable section: create a copy in memory.
  252. VERIFY(program_header.alignment() % PAGE_SIZE == 0);
  253. if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
  254. dbgln("Shenanigans! Writable ELF PT_LOAD header sneaks outside of executable.");
  255. return ENOEXEC;
  256. }
  257. int prot = 0;
  258. if (program_header.is_readable())
  259. prot |= PROT_READ;
  260. if (program_header.is_writable())
  261. prot |= PROT_WRITE;
  262. auto region_name = TRY(KString::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : ""));
  263. auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
  264. size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
  265. auto range_end = VirtualAddress { rounded_range_end };
  266. auto region = TRY(new_space.allocate_region(Memory::RandomizeVirtualAddress::Yes, range_base, range_end.get() - range_base.get(), PAGE_SIZE, region_name->view(), prot, AllocationStrategy::Reserve));
  267. // It's not always the case with PIE executables (and very well shouldn't be) that the
  268. // virtual address in the program header matches the one we end up giving the process.
  269. // In order to copy the data image correctly into memory, we need to copy the data starting at
  270. // the right initial page offset into the pages allocated for the elf_alloc-XX section.
  271. // FIXME: There's an opportunity to munmap, or at least mprotect, the padding space between
  272. // the .text and .data PT_LOAD sections of the executable.
  273. // Accessing it would definitely be a bug.
  274. auto page_offset = program_header.vaddr();
  275. page_offset.mask(~PAGE_MASK);
  276. TRY(copy_to_user((u8*)region->vaddr().as_ptr() + page_offset.get(), program_header.raw_data(), program_header.size_in_image()));
  277. return {};
  278. };
  279. auto load_section = [&](auto& program_header) -> ErrorOr<void> {
  280. if (program_header.size_in_memory() == 0)
  281. return {};
  282. if (program_header.is_writable())
  283. return load_writable_section(program_header);
  284. // Non-writable section: map the executable itself in memory.
  285. VERIFY(program_header.alignment() % PAGE_SIZE == 0);
  286. int prot = 0;
  287. if (program_header.is_readable())
  288. prot |= PROT_READ;
  289. if (program_header.is_writable())
  290. prot |= PROT_WRITE;
  291. if (program_header.is_executable())
  292. prot |= PROT_EXEC;
  293. auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
  294. size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
  295. auto range_end = VirtualAddress { rounded_range_end };
  296. auto region = TRY(new_space.allocate_region_with_vmobject(Memory::RandomizeVirtualAddress::Yes, range_base, range_end.get() - range_base.get(), program_header.alignment(), *vmobject, program_header.offset(), elf_name->view(), prot, true));
  297. if (should_allow_syscalls == ShouldAllowSyscalls::Yes)
  298. region->set_syscall_region(true);
  299. if (program_header.offset() == 0)
  300. load_base_address = (FlatPtr)region->vaddr().as_ptr();
  301. return {};
  302. };
  303. auto load_elf_program_header = [&](auto& program_header) -> ErrorOr<void> {
  304. if (program_header.type() == PT_TLS)
  305. return load_tls_section(program_header);
  306. if (program_header.type() == PT_LOAD)
  307. return load_section(program_header);
  308. if (program_header.type() == PT_GNU_STACK) {
  309. auto new_stack_size = program_header.size_in_memory();
  310. if (new_stack_size > stack_size)
  311. stack_size = new_stack_size;
  312. }
  313. // NOTE: We ignore other program header types.
  314. return {};
  315. };
  316. TRY([&] {
  317. ErrorOr<void> result;
  318. elf_image.for_each_program_header([&](ELF::Image::ProgramHeader const& program_header) {
  319. result = load_elf_program_header(program_header);
  320. return result.is_error() ? IterationDecision::Break : IterationDecision::Continue;
  321. });
  322. return result;
  323. }());
  324. if (!elf_image.entry().offset(load_offset).get()) {
  325. dbgln("do_exec: Failure loading program, entry pointer is invalid! {})", elf_image.entry().offset(load_offset));
  326. return ENOEXEC;
  327. }
  328. if (minimum_stack_size.has_value() && minimum_stack_size.value() > stack_size)
  329. stack_size = minimum_stack_size.value();
  330. auto* stack_region = TRY(new_space.allocate_region(Memory::RandomizeVirtualAddress::Yes, {}, stack_size, PAGE_SIZE, "Stack (Main thread)"sv, PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
  331. stack_region->set_stack(true);
  332. return LoadResult {
  333. load_base_address,
  334. elf_image.entry().offset(load_offset).get(),
  335. executable_size,
  336. TRY(AK::try_make_weak_ptr_if_nonnull(master_tls_region)),
  337. master_tls_size,
  338. master_tls_alignment,
  339. TRY(stack_region->try_make_weak_ptr())
  340. };
  341. }
  342. ErrorOr<LoadResult>
  343. Process::load(Memory::AddressSpace& new_space, NonnullRefPtr<OpenFileDescription> main_program_description,
  344. RefPtr<OpenFileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header)
  345. {
  346. auto load_offset = TRY(get_load_offset(main_program_header, main_program_description, interpreter_description));
  347. if (interpreter_description.is_null()) {
  348. auto load_result = TRY(load_elf_object(new_space, main_program_description, load_offset, ShouldAllocateTls::Yes, ShouldAllowSyscalls::No));
  349. m_master_tls_region = load_result.tls_region;
  350. m_master_tls_size = load_result.tls_size;
  351. m_master_tls_alignment = load_result.tls_alignment;
  352. return load_result;
  353. }
  354. Optional<size_t> requested_main_program_stack_size;
  355. {
  356. auto main_program_size = main_program_description->inode()->size();
  357. auto main_program_rounded_size = TRY(Memory::page_round_up(main_program_size));
  358. auto main_program_vmobject = TRY(Memory::SharedInodeVMObject::try_create_with_inode(*main_program_description->inode()));
  359. auto main_program_region = TRY(MM.allocate_kernel_region_with_vmobject(*main_program_vmobject, main_program_rounded_size, "Loaded Main Program ELF"sv, Memory::Region::Access::Read));
  360. auto main_program_image = ELF::Image(main_program_region->vaddr().as_ptr(), main_program_size);
  361. if (!main_program_image.is_valid())
  362. return EINVAL;
  363. main_program_image.for_each_program_header([&requested_main_program_stack_size](ELF::Image::ProgramHeader const& program_header) {
  364. if (program_header.type() != PT_GNU_STACK)
  365. return;
  366. if (program_header.size_in_memory() == 0)
  367. return;
  368. requested_main_program_stack_size = program_header.size_in_memory();
  369. });
  370. }
  371. auto interpreter_load_result = TRY(load_elf_object(new_space, *interpreter_description, load_offset, ShouldAllocateTls::No, ShouldAllowSyscalls::Yes, requested_main_program_stack_size));
  372. // TLS allocation will be done in userspace by the loader
  373. VERIFY(!interpreter_load_result.tls_region);
  374. VERIFY(!interpreter_load_result.tls_alignment);
  375. VERIFY(!interpreter_load_result.tls_size);
  376. return interpreter_load_result;
  377. }
  378. void Process::clear_signal_handlers_for_exec()
  379. {
  380. // Comments are as they are presented in the POSIX specification, but slightly out of order.
  381. for (size_t signal = 0; signal < m_signal_action_data.size(); signal++) {
  382. // Except for SIGCHLD, signals set to be ignored by the calling process image shall be set to be ignored by the new process image.
  383. // If the SIGCHLD signal is set to be ignored by the calling process image, it is unspecified whether the SIGCHLD signal is set
  384. // to be ignored or to the default action in the new process image.
  385. if (signal != SIGCHLD && m_signal_action_data[signal].handler_or_sigaction.get() == reinterpret_cast<FlatPtr>(SIG_IGN)) {
  386. m_signal_action_data[signal] = {};
  387. m_signal_action_data[signal].handler_or_sigaction.set(reinterpret_cast<FlatPtr>(SIG_IGN));
  388. continue;
  389. }
  390. // Signals set to the default action in the calling process image shall be set to the default action in the new process image.
  391. // Signals set to be caught by the calling process image shall be set to the default action in the new process image.
  392. m_signal_action_data[signal] = {};
  393. }
  394. }
  395. ErrorOr<void> Process::do_exec(NonnullRefPtr<OpenFileDescription> main_program_description, Vector<NonnullOwnPtr<KString>> arguments, Vector<NonnullOwnPtr<KString>> environment,
  396. RefPtr<OpenFileDescription> interpreter_description, Thread*& new_main_thread, InterruptsState& previous_interrupts_state, const ElfW(Ehdr) & main_program_header)
  397. {
  398. VERIFY(is_user_process());
  399. VERIFY(!Processor::in_critical());
  400. auto main_program_metadata = main_program_description->metadata();
  401. // NOTE: Don't allow running SUID binaries at all if we are in a jail.
  402. TRY(Process::current().jail().with([&](auto const& my_jail) -> ErrorOr<void> {
  403. if (my_jail && (main_program_metadata.is_setuid() || main_program_metadata.is_setgid())) {
  404. return Error::from_errno(EPERM);
  405. }
  406. return {};
  407. }));
  408. // Although we *could* handle a pseudo_path here, trying to execute something that doesn't have
  409. // a custody (e.g. BlockDevice or RandomDevice) is pretty suspicious anyway.
  410. auto path = TRY(main_program_description->original_absolute_path());
  411. dbgln_if(EXEC_DEBUG, "do_exec: {}", path);
  412. auto last_part = path->view().find_last_split_view('/');
  413. auto new_process_name = TRY(KString::try_create(last_part));
  414. auto new_main_thread_name = TRY(new_process_name->try_clone());
  415. auto allocated_space = TRY(Memory::AddressSpace::try_create(*this, nullptr));
  416. OwnPtr<Memory::AddressSpace> old_space;
  417. auto old_master_tls_region = m_master_tls_region;
  418. auto old_master_tls_size = m_master_tls_size;
  419. auto old_master_tls_alignment = m_master_tls_alignment;
  420. auto& new_space = m_space.with([&](auto& space) -> Memory::AddressSpace& {
  421. old_space = move(space);
  422. space = move(allocated_space);
  423. return *space;
  424. });
  425. m_master_tls_region = nullptr;
  426. m_master_tls_size = 0;
  427. m_master_tls_alignment = 0;
  428. ArmedScopeGuard space_guard([&]() {
  429. // If we failed at any point from now on we have to revert back to the old address space
  430. m_space.with([&](auto& space) {
  431. space = old_space.release_nonnull();
  432. });
  433. m_master_tls_region = old_master_tls_region;
  434. m_master_tls_size = old_master_tls_size;
  435. m_master_tls_alignment = old_master_tls_alignment;
  436. Memory::MemoryManager::enter_process_address_space(*this);
  437. });
  438. auto load_result = TRY(load(new_space, main_program_description, interpreter_description, main_program_header));
  439. // NOTE: We don't need the interpreter executable description after this point.
  440. // We destroy it here to prevent it from getting destroyed when we return from this function.
  441. // That's important because when we're returning from this function, we're in a very delicate
  442. // state where we can't block (e.g by trying to acquire a mutex in description teardown.)
  443. bool has_interpreter = interpreter_description;
  444. interpreter_description = nullptr;
  445. auto* signal_trampoline_region = TRY(new_space.allocate_region_with_vmobject(Memory::RandomizeVirtualAddress::Yes, {}, PAGE_SIZE, PAGE_SIZE, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline"sv, PROT_READ | PROT_EXEC, true));
  446. signal_trampoline_region->set_syscall_region(true);
  447. // (For dynamically linked executable) Allocate an FD for passing the main executable to the dynamic loader.
  448. Optional<ScopedDescriptionAllocation> main_program_fd_allocation;
  449. if (has_interpreter)
  450. main_program_fd_allocation = TRY(allocate_fd());
  451. auto old_credentials = this->credentials();
  452. auto new_credentials = old_credentials;
  453. auto old_process_attached_jail = m_attached_jail.with([&](auto& jail) -> RefPtr<Jail> { return jail; });
  454. auto old_scoped_list = m_jail_process_list.with([&](auto& list) -> RefPtr<ProcessList> { return list; });
  455. bool executable_is_setid = false;
  456. if (!(main_program_description->custody()->mount_flags() & MS_NOSUID)) {
  457. auto new_euid = old_credentials->euid();
  458. auto new_egid = old_credentials->egid();
  459. auto new_suid = old_credentials->suid();
  460. auto new_sgid = old_credentials->sgid();
  461. if (main_program_metadata.is_setuid()) {
  462. executable_is_setid = true;
  463. new_euid = main_program_metadata.uid;
  464. new_suid = main_program_metadata.uid;
  465. }
  466. if (main_program_metadata.is_setgid()) {
  467. executable_is_setid = true;
  468. new_egid = main_program_metadata.gid;
  469. new_sgid = main_program_metadata.gid;
  470. }
  471. if (executable_is_setid) {
  472. new_credentials = TRY(Credentials::create(
  473. old_credentials->uid(),
  474. old_credentials->gid(),
  475. new_euid,
  476. new_egid,
  477. new_suid,
  478. new_sgid,
  479. old_credentials->extra_gids(),
  480. old_credentials->sid(),
  481. old_credentials->pgid()));
  482. }
  483. }
  484. // We commit to the new executable at this point. There is no turning back!
  485. space_guard.disarm();
  486. // Prevent other processes from attaching to us with ptrace while we're doing this.
  487. MutexLocker ptrace_locker(ptrace_lock());
  488. // Disable profiling temporarily in case it's running on this process.
  489. auto was_profiling = m_profiling;
  490. TemporaryChange profiling_disabler(m_profiling, false);
  491. kill_threads_except_self();
  492. with_mutable_protected_data([&](auto& protected_data) {
  493. protected_data.credentials = move(new_credentials);
  494. protected_data.dumpable = !executable_is_setid;
  495. protected_data.executable_is_setid = executable_is_setid;
  496. });
  497. m_executable.with([&](auto& executable) { executable = main_program_description->custody(); });
  498. m_arguments = move(arguments);
  499. m_attached_jail.with([&](auto& jail) {
  500. jail = old_process_attached_jail;
  501. });
  502. m_jail_process_list.with([&](auto& list) {
  503. list = old_scoped_list;
  504. });
  505. m_environment = move(environment);
  506. TRY(m_unveil_data.with([&](auto& unveil_data) -> ErrorOr<void> {
  507. TRY(m_exec_unveil_data.with([&](auto& exec_unveil_data) -> ErrorOr<void> {
  508. // Note: If we have exec unveil data being waiting to be dispatched
  509. // to the current execve'd program, then we apply the unveil data and
  510. // ensure it is locked in the new program.
  511. if (exec_unveil_data.state == VeilState::Dropped) {
  512. unveil_data.state = VeilState::LockedInherited;
  513. exec_unveil_data.state = VeilState::None;
  514. unveil_data.paths = TRY(exec_unveil_data.paths.deep_copy());
  515. } else {
  516. unveil_data.state = VeilState::None;
  517. exec_unveil_data.state = VeilState::None;
  518. unveil_data.paths.clear();
  519. unveil_data.paths.set_metadata({ TRY(KString::try_create("/"sv)), UnveilAccess::None, false });
  520. }
  521. exec_unveil_data.paths.clear();
  522. exec_unveil_data.paths.set_metadata({ TRY(KString::try_create("/"sv)), UnveilAccess::None, false });
  523. return {};
  524. }));
  525. return {};
  526. }));
  527. m_coredump_properties.for_each([](auto& property) {
  528. property = {};
  529. });
  530. auto* current_thread = Thread::current();
  531. current_thread->reset_signals_for_exec();
  532. clear_signal_handlers_for_exec();
  533. clear_futex_queues_on_exec();
  534. m_fds.with_exclusive([&](auto& fds) {
  535. fds.change_each([&](auto& file_description_metadata) {
  536. if (file_description_metadata.is_valid() && file_description_metadata.flags() & FD_CLOEXEC)
  537. file_description_metadata = {};
  538. });
  539. });
  540. if (main_program_fd_allocation.has_value()) {
  541. main_program_description->set_readable(true);
  542. m_fds.with_exclusive([&](auto& fds) { fds[main_program_fd_allocation->fd].set(move(main_program_description), FD_CLOEXEC); });
  543. }
  544. new_main_thread = nullptr;
  545. if (&current_thread->process() == this) {
  546. new_main_thread = current_thread;
  547. } else {
  548. for_each_thread([&](auto& thread) {
  549. new_main_thread = &thread;
  550. return IterationDecision::Break;
  551. });
  552. }
  553. VERIFY(new_main_thread);
  554. auto credentials = this->credentials();
  555. auto auxv = generate_auxiliary_vector(load_result.load_base, load_result.entry_eip, credentials->uid(), credentials->euid(), credentials->gid(), credentials->egid(), path->view(), main_program_fd_allocation);
  556. // FIXME: How much stack space does process startup need?
  557. if (!validate_stack_size(m_arguments, m_environment, auxv))
  558. return E2BIG;
  559. // NOTE: We create the new stack before disabling interrupts since it will zero-fault
  560. // and we don't want to deal with faults after this point.
  561. auto new_userspace_sp = TRY(make_userspace_context_for_main_thread(new_main_thread->regs(), *load_result.stack_region.unsafe_ptr(), m_arguments, m_environment, move(auxv)));
  562. set_name(move(new_process_name));
  563. new_main_thread->set_name(move(new_main_thread_name));
  564. if (wait_for_tracer_at_next_execve()) {
  565. // Make sure we release the ptrace lock here or the tracer will block forever.
  566. ptrace_locker.unlock();
  567. Thread::current()->send_urgent_signal_to_self(SIGSTOP);
  568. } else {
  569. // Unlock regardless before disabling interrupts.
  570. // Ensure we always unlock after checking ptrace status to avoid TOCTOU ptrace issues
  571. ptrace_locker.unlock();
  572. }
  573. // We enter a critical section here because we don't want to get interrupted between do_exec()
  574. // and Processor::assume_context() or the next context switch.
  575. // If we used an InterruptDisabler that calls enable_interrupts() on exit, we might timer tick'd too soon in exec().
  576. Processor::enter_critical();
  577. previous_interrupts_state = processor_interrupts_state();
  578. Processor::disable_interrupts();
  579. // NOTE: Be careful to not trigger any page faults below!
  580. with_mutable_protected_data([&](auto& protected_data) {
  581. protected_data.promises = protected_data.execpromises.load();
  582. protected_data.has_promises = protected_data.has_execpromises.load();
  583. protected_data.execpromises = 0;
  584. protected_data.has_execpromises = false;
  585. protected_data.signal_trampoline = signal_trampoline_region->vaddr();
  586. // FIXME: PID/TID ISSUE
  587. protected_data.pid = new_main_thread->tid().value();
  588. });
  589. auto tsr_result = new_main_thread->make_thread_specific_region({});
  590. if (tsr_result.is_error()) {
  591. // FIXME: We cannot fail this late. Refactor this so the allocation happens before we commit to the new executable.
  592. VERIFY_NOT_REACHED();
  593. }
  594. new_main_thread->reset_fpu_state();
  595. auto& regs = new_main_thread->m_regs;
  596. address_space().with([&](auto& space) {
  597. regs.set_exec_state(load_result.entry_eip, new_userspace_sp, *space);
  598. });
  599. {
  600. TemporaryChange profiling_disabler(m_profiling, was_profiling);
  601. PerformanceManager::add_process_exec_event(*this);
  602. }
  603. u32 lock_count_to_restore;
  604. [[maybe_unused]] auto rc = big_lock().force_unlock_exclusive_if_locked(lock_count_to_restore);
  605. VERIFY_INTERRUPTS_DISABLED();
  606. VERIFY(Processor::in_critical());
  607. return {};
  608. }
  609. static Array<ELF::AuxiliaryValue, auxiliary_vector_size> generate_auxiliary_vector(FlatPtr load_base, FlatPtr entry_eip, UserID uid, UserID euid, GroupID gid, GroupID egid, StringView executable_path, Optional<Process::ScopedDescriptionAllocation> const& main_program_fd_allocation)
  610. {
  611. return { {
  612. // PHDR/EXECFD
  613. // PH*
  614. { ELF::AuxiliaryValue::PageSize, PAGE_SIZE },
  615. { ELF::AuxiliaryValue::BaseAddress, (void*)load_base },
  616. { ELF::AuxiliaryValue::Entry, (void*)entry_eip },
  617. // NOTELF
  618. { ELF::AuxiliaryValue::Uid, (long)uid.value() },
  619. { ELF::AuxiliaryValue::EUid, (long)euid.value() },
  620. { ELF::AuxiliaryValue::Gid, (long)gid.value() },
  621. { ELF::AuxiliaryValue::EGid, (long)egid.value() },
  622. { ELF::AuxiliaryValue::Platform, Processor::platform_string() },
  623. // FIXME: This is platform specific
  624. #if ARCH(X86_64)
  625. { ELF::AuxiliaryValue::HwCap, (long)CPUID(1).edx() },
  626. #elif ARCH(AARCH64)
  627. { ELF::AuxiliaryValue::HwCap, (long)0 },
  628. #else
  629. # error "Unknown architecture"
  630. #endif
  631. { ELF::AuxiliaryValue::ClockTick, (long)TimeManagement::the().ticks_per_second() },
  632. // FIXME: Also take into account things like extended filesystem permissions? That's what linux does...
  633. { ELF::AuxiliaryValue::Secure, ((uid != euid) || (gid != egid)) ? 1 : 0 },
  634. { ELF::AuxiliaryValue::Random, nullptr },
  635. { ELF::AuxiliaryValue::ExecFilename, executable_path },
  636. main_program_fd_allocation.has_value() ? ELF::AuxiliaryValue { ELF::AuxiliaryValue::ExecFileDescriptor, main_program_fd_allocation->fd } : ELF::AuxiliaryValue { ELF::AuxiliaryValue::Ignore, 0L },
  637. { ELF::AuxiliaryValue::Null, 0L },
  638. } };
  639. }
  640. static ErrorOr<Vector<NonnullOwnPtr<KString>>> find_shebang_interpreter_for_executable(char const first_page[], size_t nread)
  641. {
  642. int word_start = 2;
  643. size_t word_length = 0;
  644. if (nread > 2 && first_page[0] == '#' && first_page[1] == '!') {
  645. Vector<NonnullOwnPtr<KString>> interpreter_words;
  646. for (size_t i = 2; i < nread; ++i) {
  647. if (first_page[i] == '\n') {
  648. break;
  649. }
  650. if (first_page[i] != ' ') {
  651. ++word_length;
  652. }
  653. if (first_page[i] == ' ') {
  654. if (word_length > 0) {
  655. auto word = TRY(KString::try_create(StringView { &first_page[word_start], word_length }));
  656. TRY(interpreter_words.try_append(move(word)));
  657. }
  658. word_length = 0;
  659. word_start = i + 1;
  660. }
  661. }
  662. if (word_length > 0) {
  663. auto word = TRY(KString::try_create(StringView { &first_page[word_start], word_length }));
  664. TRY(interpreter_words.try_append(move(word)));
  665. }
  666. if (!interpreter_words.is_empty())
  667. return interpreter_words;
  668. }
  669. return ENOEXEC;
  670. }
  671. ErrorOr<RefPtr<OpenFileDescription>> Process::find_elf_interpreter_for_executable(StringView path, ElfW(Ehdr) const& main_executable_header, size_t main_executable_header_size, size_t file_size)
  672. {
  673. // Not using ErrorOr here because we'll want to do the same thing in userspace in the RTLD
  674. StringBuilder interpreter_path_builder;
  675. if (!TRY(ELF::validate_program_headers(main_executable_header, file_size, { &main_executable_header, main_executable_header_size }, &interpreter_path_builder))) {
  676. dbgln("exec({}): File has invalid ELF Program headers", path);
  677. return ENOEXEC;
  678. }
  679. auto interpreter_path = interpreter_path_builder.string_view();
  680. if (!interpreter_path.is_empty()) {
  681. dbgln_if(EXEC_DEBUG, "exec({}): Using program interpreter {}", path, interpreter_path);
  682. auto interpreter_description = TRY(VirtualFileSystem::the().open(credentials(), interpreter_path, O_EXEC, 0, current_directory()));
  683. auto interp_metadata = interpreter_description->metadata();
  684. VERIFY(interpreter_description->inode());
  685. // Validate the program interpreter as a valid elf binary.
  686. // If your program interpreter is a #! file or something, it's time to stop playing games :)
  687. if (interp_metadata.size < (int)sizeof(ElfW(Ehdr)))
  688. return ENOEXEC;
  689. char first_page[PAGE_SIZE] = {};
  690. auto first_page_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&first_page);
  691. auto nread = TRY(interpreter_description->read(first_page_buffer, sizeof(first_page)));
  692. if (nread < sizeof(ElfW(Ehdr)))
  693. return ENOEXEC;
  694. auto* elf_header = (ElfW(Ehdr)*)first_page;
  695. if (!ELF::validate_elf_header(*elf_header, interp_metadata.size)) {
  696. dbgln("exec({}): Interpreter ({}) has invalid ELF header", path, interpreter_path);
  697. return ENOEXEC;
  698. }
  699. // Not using ErrorOr here because we'll want to do the same thing in userspace in the RTLD
  700. StringBuilder interpreter_interpreter_path_builder;
  701. if (!TRY(ELF::validate_program_headers(*elf_header, interp_metadata.size, { first_page, nread }, &interpreter_interpreter_path_builder))) {
  702. dbgln("exec({}): Interpreter ({}) has invalid ELF Program headers", path, interpreter_path);
  703. return ENOEXEC;
  704. }
  705. auto interpreter_interpreter_path = interpreter_interpreter_path_builder.string_view();
  706. if (!interpreter_interpreter_path.is_empty()) {
  707. dbgln("exec({}): Interpreter ({}) has its own interpreter ({})! No thank you!", path, interpreter_path, interpreter_interpreter_path);
  708. return ELOOP;
  709. }
  710. return interpreter_description;
  711. }
  712. if (main_executable_header.e_type == ET_REL) {
  713. // We can't exec an ET_REL, that's just an object file from the compiler
  714. return ENOEXEC;
  715. }
  716. if (main_executable_header.e_type == ET_DYN) {
  717. // If it's ET_DYN with no PT_INTERP, then it's a dynamic executable responsible
  718. // for its own relocation (i.e. it's /usr/lib/Loader.so)
  719. if (path != "/usr/lib/Loader.so")
  720. dbgln("exec({}): WARNING - Dynamic ELF executable without a PT_INTERP header, and isn't /usr/lib/Loader.so", path);
  721. return nullptr;
  722. }
  723. // No interpreter, but, path refers to a valid elf image
  724. return nullptr;
  725. }
  726. ErrorOr<void> Process::exec(NonnullOwnPtr<KString> path, Vector<NonnullOwnPtr<KString>> arguments, Vector<NonnullOwnPtr<KString>> environment, Thread*& new_main_thread, InterruptsState& previous_interrupts_state, int recursion_depth)
  727. {
  728. if (recursion_depth > 2) {
  729. dbgln("exec({}): SHENANIGANS! recursed too far trying to find #! interpreter", path);
  730. return ELOOP;
  731. }
  732. // Open the file to check what kind of binary format it is
  733. // Currently supported formats:
  734. // - #! interpreted file
  735. // - ELF32
  736. // * ET_EXEC binary that just gets loaded
  737. // * ET_DYN binary that requires a program interpreter
  738. //
  739. auto description = TRY(VirtualFileSystem::the().open(credentials(), path->view(), O_EXEC, 0, current_directory()));
  740. auto metadata = description->metadata();
  741. if (!metadata.is_regular_file())
  742. return EACCES;
  743. // Always gonna need at least 3 bytes. these are for #!X
  744. if (metadata.size < 3)
  745. return ENOEXEC;
  746. VERIFY(description->inode());
  747. // Read the first page of the program into memory so we can validate the binfmt of it
  748. char first_page[PAGE_SIZE];
  749. auto first_page_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&first_page);
  750. auto nread = TRY(description->read(first_page_buffer, sizeof(first_page)));
  751. // 1) #! interpreted file
  752. auto shebang_result = find_shebang_interpreter_for_executable(first_page, nread);
  753. if (!shebang_result.is_error()) {
  754. auto shebang_words = shebang_result.release_value();
  755. auto shebang_path = TRY(shebang_words.first()->try_clone());
  756. arguments[0] = move(path);
  757. TRY(arguments.try_prepend(move(shebang_words)));
  758. return exec(move(shebang_path), move(arguments), move(environment), new_main_thread, previous_interrupts_state, ++recursion_depth);
  759. }
  760. // #2) ELF32 for i386
  761. if (nread < sizeof(ElfW(Ehdr)))
  762. return ENOEXEC;
  763. auto const* main_program_header = (ElfW(Ehdr)*)first_page;
  764. if (!ELF::validate_elf_header(*main_program_header, metadata.size)) {
  765. dbgln("exec({}): File has invalid ELF header", path);
  766. return ENOEXEC;
  767. }
  768. auto interpreter_description = TRY(find_elf_interpreter_for_executable(path->view(), *main_program_header, nread, metadata.size));
  769. return do_exec(move(description), move(arguments), move(environment), move(interpreter_description), new_main_thread, previous_interrupts_state, *main_program_header);
  770. }
  771. ErrorOr<FlatPtr> Process::sys$execve(Userspace<Syscall::SC_execve_params const*> user_params)
  772. {
  773. VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
  774. TRY(require_promise(Pledge::exec));
  775. Thread* new_main_thread = nullptr;
  776. InterruptsState previous_interrupts_state = InterruptsState::Enabled;
  777. // NOTE: Be extremely careful with allocating any kernel memory in this function.
  778. // On success, the kernel stack will be lost.
  779. // The explicit block scope below is specifically placed to minimize the number
  780. // of stack locals in this function.
  781. {
  782. auto params = TRY(copy_typed_from_user(user_params));
  783. if (params.arguments.length > ARG_MAX || params.environment.length > ARG_MAX)
  784. return E2BIG;
  785. // NOTE: The caller is expected to always pass at least one argument by convention,
  786. // the program path that was passed as params.path.
  787. if (params.arguments.length == 0)
  788. return EINVAL;
  789. auto path = TRY(get_syscall_path_argument(params.path));
  790. auto copy_user_strings = [](auto const& list, auto& output) -> ErrorOr<void> {
  791. if (!list.length)
  792. return {};
  793. Checked<size_t> size = sizeof(*list.strings);
  794. size *= list.length;
  795. if (size.has_overflow())
  796. return EOVERFLOW;
  797. Vector<Syscall::StringArgument, 32> strings;
  798. TRY(strings.try_resize(list.length));
  799. TRY(copy_from_user(strings.data(), list.strings, size.value()));
  800. for (size_t i = 0; i < list.length; ++i) {
  801. auto string = TRY(try_copy_kstring_from_user(strings[i]));
  802. TRY(output.try_append(move(string)));
  803. }
  804. return {};
  805. };
  806. Vector<NonnullOwnPtr<KString>> arguments;
  807. TRY(copy_user_strings(params.arguments, arguments));
  808. Vector<NonnullOwnPtr<KString>> environment;
  809. TRY(copy_user_strings(params.environment, environment));
  810. TRY(exec(move(path), move(arguments), move(environment), new_main_thread, previous_interrupts_state));
  811. }
  812. // NOTE: If we're here, the exec has succeeded and we've got a new executable image!
  813. // We will not return normally from this function. Instead, the next time we
  814. // get scheduled, it'll be at the entry point of the new executable.
  815. VERIFY_INTERRUPTS_DISABLED();
  816. VERIFY(Processor::in_critical());
  817. auto* current_thread = Thread::current();
  818. if (current_thread == new_main_thread) {
  819. // We need to enter the scheduler lock before changing the state
  820. // and it will be released after the context switch into that
  821. // thread. We should also still be in our critical section
  822. VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
  823. VERIFY(Processor::in_critical() == 1);
  824. g_scheduler_lock.lock();
  825. current_thread->set_state(Thread::State::Running);
  826. Processor::assume_context(*current_thread, previous_interrupts_state);
  827. VERIFY_NOT_REACHED();
  828. }
  829. // NOTE: This code path is taken in the non-syscall case, i.e when the kernel spawns
  830. // a userspace process directly (such as /bin/SystemServer on startup)
  831. restore_processor_interrupts_state(previous_interrupts_state);
  832. Processor::leave_critical();
  833. return 0;
  834. }
  835. }