mmap.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/WeakPtr.h>
  8. #include <Kernel/FileSystem/FileDescription.h>
  9. #include <Kernel/PerformanceEventBuffer.h>
  10. #include <Kernel/Process.h>
  11. #include <Kernel/VM/MemoryManager.h>
  12. #include <Kernel/VM/PageDirectory.h>
  13. #include <Kernel/VM/PrivateInodeVMObject.h>
  14. #include <Kernel/VM/Region.h>
  15. #include <Kernel/VM/SharedInodeVMObject.h>
  16. #include <LibC/limits.h>
  17. #include <LibELF/Validation.h>
  18. namespace Kernel {
  19. static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, const Region& region)
  20. {
  21. // Normally we don't allow W -> X transitions, but we have to make an exception
  22. // for the dynamic loader, which needs to do this after performing text relocations.
  23. // FIXME: Investigate whether we could get rid of all text relocations entirely.
  24. // The exception is only made if all the following criteria is fulfilled:
  25. // The region must be RW
  26. if (!(region.is_readable() && region.is_writable() && !region.is_executable()))
  27. return false;
  28. // The region wants to become RX
  29. if (!(make_readable && !make_writable && make_executable))
  30. return false;
  31. // The region is backed by a file
  32. if (!region.vmobject().is_inode())
  33. return false;
  34. // The file mapping is private, not shared (no relocations in a shared mapping!)
  35. if (!region.vmobject().is_private_inode())
  36. return false;
  37. auto& inode_vm = static_cast<const InodeVMObject&>(region.vmobject());
  38. auto& inode = inode_vm.inode();
  39. Elf32_Ehdr header;
  40. auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&header);
  41. auto nread = inode.read_bytes(0, sizeof(header), buffer, nullptr);
  42. if (nread != sizeof(header))
  43. return false;
  44. // The file is a valid ELF binary
  45. if (!ELF::validate_elf_header(header, inode.size()))
  46. return false;
  47. // The file is an ELF shared object
  48. if (header.e_type != ET_DYN)
  49. return false;
  50. // FIXME: Are there any additional checks/validations we could do here?
  51. return true;
  52. }
  53. static bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, const Region* region = nullptr)
  54. {
  55. bool make_readable = prot & PROT_READ;
  56. bool make_writable = prot & PROT_WRITE;
  57. bool make_executable = prot & PROT_EXEC;
  58. if (map_anonymous && make_executable)
  59. return false;
  60. if (make_writable && make_executable)
  61. return false;
  62. if (map_stack) {
  63. if (make_executable)
  64. return false;
  65. if (!make_readable || !make_writable)
  66. return false;
  67. }
  68. if (region) {
  69. if (make_writable && region->has_been_executable())
  70. return false;
  71. if (make_executable && region->has_been_writable()) {
  72. if (should_make_executable_exception_for_dynamic_loader(make_readable, make_writable, make_executable, *region))
  73. return true;
  74. return false;
  75. }
  76. }
  77. return true;
  78. }
  79. static bool validate_inode_mmap_prot(const Process& process, int prot, const Inode& inode, bool map_shared)
  80. {
  81. auto metadata = inode.metadata();
  82. if ((prot & PROT_READ) && !metadata.may_read(process))
  83. return false;
  84. if (map_shared) {
  85. // FIXME: What about readonly filesystem mounts? We cannot make a
  86. // decision here without knowing the mount flags, so we would need to
  87. // keep a Custody or something from mmap time.
  88. if ((prot & PROT_WRITE) && !metadata.may_write(process))
  89. return false;
  90. InterruptDisabler disabler;
  91. if (auto shared_vmobject = inode.shared_vmobject()) {
  92. if ((prot & PROT_EXEC) && shared_vmobject->writable_mappings())
  93. return false;
  94. if ((prot & PROT_WRITE) && shared_vmobject->executable_mappings())
  95. return false;
  96. }
  97. }
  98. return true;
  99. }
  100. KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
  101. {
  102. REQUIRE_PROMISE(stdio);
  103. Syscall::SC_mmap_params params;
  104. if (!copy_from_user(&params, user_params))
  105. return EFAULT;
  106. FlatPtr addr = params.addr;
  107. auto size = params.size;
  108. auto alignment = params.alignment;
  109. auto prot = params.prot;
  110. auto flags = params.flags;
  111. auto fd = params.fd;
  112. auto offset = params.offset;
  113. if (prot & PROT_EXEC) {
  114. REQUIRE_PROMISE(prot_exec);
  115. }
  116. if (prot & MAP_FIXED) {
  117. REQUIRE_PROMISE(map_fixed);
  118. }
  119. if (alignment & ~PAGE_MASK)
  120. return EINVAL;
  121. if (page_round_up_would_wrap(size))
  122. return EINVAL;
  123. if (!is_user_range(VirtualAddress(addr), page_round_up(size)))
  124. return EFAULT;
  125. String name;
  126. if (params.name.characters) {
  127. if (params.name.length > PATH_MAX)
  128. return ENAMETOOLONG;
  129. name = copy_string_from_user(params.name);
  130. if (name.is_null())
  131. return EFAULT;
  132. }
  133. if (size == 0)
  134. return EINVAL;
  135. if ((FlatPtr)addr & ~PAGE_MASK)
  136. return EINVAL;
  137. bool map_shared = flags & MAP_SHARED;
  138. bool map_anonymous = flags & MAP_ANONYMOUS;
  139. bool map_private = flags & MAP_PRIVATE;
  140. bool map_stack = flags & MAP_STACK;
  141. bool map_fixed = flags & MAP_FIXED;
  142. bool map_noreserve = flags & MAP_NORESERVE;
  143. bool map_randomized = flags & MAP_RANDOMIZED;
  144. if (map_shared && map_private)
  145. return EINVAL;
  146. if (!map_shared && !map_private)
  147. return EINVAL;
  148. if (map_fixed && map_randomized)
  149. return EINVAL;
  150. if (!validate_mmap_prot(prot, map_stack, map_anonymous))
  151. return EINVAL;
  152. if (map_stack && (!map_private || !map_anonymous))
  153. return EINVAL;
  154. Region* region = nullptr;
  155. Optional<Range> range;
  156. if (map_randomized) {
  157. range = space().page_directory().range_allocator().allocate_randomized(page_round_up(size), alignment);
  158. } else {
  159. range = space().allocate_range(VirtualAddress(addr), size, alignment);
  160. if (!range.has_value()) {
  161. if (addr && !map_fixed) {
  162. // If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
  163. range = space().allocate_range({}, size, alignment);
  164. }
  165. }
  166. }
  167. if (!range.has_value())
  168. return ENOMEM;
  169. if (map_anonymous) {
  170. auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
  171. auto region_or_error = space().allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
  172. if (region_or_error.is_error())
  173. return region_or_error.error().error();
  174. region = region_or_error.value();
  175. } else {
  176. if (offset < 0)
  177. return EINVAL;
  178. if (static_cast<size_t>(offset) & ~PAGE_MASK)
  179. return EINVAL;
  180. auto description = file_description(fd);
  181. if (!description)
  182. return EBADF;
  183. if (description->is_directory())
  184. return ENODEV;
  185. // Require read access even when read protection is not requested.
  186. if (!description->is_readable())
  187. return EACCES;
  188. if (map_shared) {
  189. if ((prot & PROT_WRITE) && !description->is_writable())
  190. return EACCES;
  191. }
  192. if (description->inode()) {
  193. if (!validate_inode_mmap_prot(*this, prot, *description->inode(), map_shared))
  194. return EACCES;
  195. }
  196. auto region_or_error = description->mmap(*this, range.value(), static_cast<u64>(offset), prot, map_shared);
  197. if (region_or_error.is_error())
  198. return region_or_error.error().error();
  199. region = region_or_error.value();
  200. }
  201. if (!region)
  202. return ENOMEM;
  203. if (auto* event_buffer = current_perf_events_buffer()) {
  204. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MMAP, region->vaddr().get(),
  205. region->size(), name.is_null() ? region->name() : name);
  206. }
  207. region->set_mmap(true);
  208. if (map_shared)
  209. region->set_shared(true);
  210. if (map_stack)
  211. region->set_stack(true);
  212. if (!name.is_null())
  213. region->set_name(name);
  214. return region->vaddr().get();
  215. }
  216. static KResultOr<Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
  217. {
  218. if (page_round_up_would_wrap(size))
  219. return EINVAL;
  220. if ((address + size) < address)
  221. return EINVAL;
  222. if (page_round_up_would_wrap(address + size))
  223. return EINVAL;
  224. auto base = VirtualAddress { address }.page_base();
  225. auto end = page_round_up(address + size);
  226. return Range { base, end - base.get() };
  227. }
  228. KResultOr<int> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
  229. {
  230. REQUIRE_PROMISE(stdio);
  231. if (prot & PROT_EXEC) {
  232. REQUIRE_PROMISE(prot_exec);
  233. }
  234. auto range_or_error = expand_range_to_page_boundaries(addr, size);
  235. if (range_or_error.is_error())
  236. return range_or_error.error();
  237. auto range_to_mprotect = range_or_error.value();
  238. if (!range_to_mprotect.size())
  239. return EINVAL;
  240. if (!is_user_range(range_to_mprotect))
  241. return EFAULT;
  242. if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) {
  243. if (!whole_region->is_mmap())
  244. return EPERM;
  245. if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
  246. return EINVAL;
  247. if (whole_region->access() == prot_to_region_access_flags(prot))
  248. return 0;
  249. if (whole_region->vmobject().is_inode()
  250. && !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(whole_region->vmobject()).inode(), whole_region->is_shared())) {
  251. return EACCES;
  252. }
  253. whole_region->set_readable(prot & PROT_READ);
  254. whole_region->set_writable(prot & PROT_WRITE);
  255. whole_region->set_executable(prot & PROT_EXEC);
  256. whole_region->remap();
  257. return 0;
  258. }
  259. // Check if we can carve out the desired range from an existing region
  260. if (auto* old_region = space().find_region_containing(range_to_mprotect)) {
  261. if (!old_region->is_mmap())
  262. return EPERM;
  263. if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
  264. return EINVAL;
  265. if (old_region->access() == prot_to_region_access_flags(prot))
  266. return 0;
  267. if (old_region->vmobject().is_inode()
  268. && !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(old_region->vmobject()).inode(), old_region->is_shared())) {
  269. return EACCES;
  270. }
  271. // Remove the old region from our regions tree, since were going to add another region
  272. // with the exact same start address, but dont deallocate it yet
  273. auto region = space().take_region(*old_region);
  274. VERIFY(region);
  275. // Unmap the old region here, specifying that we *don't* want the VM deallocated.
  276. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  277. // This vector is the region(s) adjacent to our range.
  278. // We need to allocate a new region for the range we wanted to change permission bits on.
  279. auto adjacent_regions = space().split_region_around_range(*region, range_to_mprotect);
  280. size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
  281. auto& new_region = space().allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
  282. new_region.set_readable(prot & PROT_READ);
  283. new_region.set_writable(prot & PROT_WRITE);
  284. new_region.set_executable(prot & PROT_EXEC);
  285. // Map the new regions using our page directory (they were just allocated and don't have one).
  286. for (auto* adjacent_region : adjacent_regions) {
  287. adjacent_region->map(space().page_directory());
  288. }
  289. new_region.map(space().page_directory());
  290. return 0;
  291. }
  292. // FIXME: We should also support mprotect() across multiple regions. (#175) (#964)
  293. return EINVAL;
  294. }
  295. KResultOr<int> Process::sys$madvise(Userspace<void*> address, size_t size, int advice)
  296. {
  297. REQUIRE_PROMISE(stdio);
  298. auto range_or_error = expand_range_to_page_boundaries(address, size);
  299. if (range_or_error.is_error())
  300. return range_or_error.error();
  301. auto range_to_madvise = range_or_error.value();
  302. if (!range_to_madvise.size())
  303. return EINVAL;
  304. if (!is_user_range(range_to_madvise))
  305. return EFAULT;
  306. auto* region = space().find_region_from_range(range_to_madvise);
  307. if (!region)
  308. return EINVAL;
  309. if (!region->is_mmap())
  310. return EPERM;
  311. bool set_volatile = advice & MADV_SET_VOLATILE;
  312. bool set_nonvolatile = advice & MADV_SET_NONVOLATILE;
  313. if (set_volatile && set_nonvolatile)
  314. return EINVAL;
  315. if (set_volatile || set_nonvolatile) {
  316. if (!region->vmobject().is_anonymous())
  317. return EPERM;
  318. bool was_purged = false;
  319. switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
  320. case Region::SetVolatileError::Success:
  321. break;
  322. case Region::SetVolatileError::NotPurgeable:
  323. return EPERM;
  324. case Region::SetVolatileError::OutOfMemory:
  325. return ENOMEM;
  326. }
  327. if (set_nonvolatile)
  328. return was_purged ? 1 : 0;
  329. return 0;
  330. }
  331. if (advice & MADV_GET_VOLATILE) {
  332. if (!region->vmobject().is_anonymous())
  333. return EPERM;
  334. return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
  335. }
  336. return EINVAL;
  337. }
  338. KResultOr<int> Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*> user_params)
  339. {
  340. REQUIRE_PROMISE(stdio);
  341. Syscall::SC_set_mmap_name_params params;
  342. if (!copy_from_user(&params, user_params))
  343. return EFAULT;
  344. if (params.name.length > PATH_MAX)
  345. return ENAMETOOLONG;
  346. auto name = copy_string_from_user(params.name);
  347. if (name.is_null())
  348. return EFAULT;
  349. auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.addr, params.size);
  350. if (range_or_error.is_error())
  351. return range_or_error.error();
  352. auto range = range_or_error.value();
  353. auto* region = space().find_region_from_range(range);
  354. if (!region)
  355. return EINVAL;
  356. if (!region->is_mmap())
  357. return EPERM;
  358. if (auto* event_buffer = current_perf_events_buffer()) {
  359. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MMAP, region->vaddr().get(), region->size(), name.characters());
  360. }
  361. region->set_name(move(name));
  362. return 0;
  363. }
  364. KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
  365. {
  366. REQUIRE_PROMISE(stdio);
  367. if (!size)
  368. return EINVAL;
  369. auto range_or_error = expand_range_to_page_boundaries(addr, size);
  370. if (range_or_error.is_error())
  371. return range_or_error.error();
  372. auto range_to_unmap = range_or_error.value();
  373. if (!is_user_range(range_to_unmap))
  374. return EFAULT;
  375. if (auto* whole_region = space().find_region_from_range(range_to_unmap)) {
  376. if (!whole_region->is_mmap())
  377. return EPERM;
  378. auto base = whole_region->vaddr();
  379. auto size = whole_region->size();
  380. bool success = space().deallocate_region(*whole_region);
  381. VERIFY(success);
  382. if (auto* event_buffer = current_perf_events_buffer()) {
  383. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, base.get(), size, nullptr);
  384. }
  385. return 0;
  386. }
  387. if (auto* old_region = space().find_region_containing(range_to_unmap)) {
  388. if (!old_region->is_mmap())
  389. return EPERM;
  390. // Remove the old region from our regions tree, since were going to add another region
  391. // with the exact same start address, but dont deallocate it yet
  392. auto region = space().take_region(*old_region);
  393. VERIFY(region);
  394. // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
  395. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  396. auto new_regions = space().split_region_around_range(*region, range_to_unmap);
  397. // Instead we give back the unwanted VM manually.
  398. space().page_directory().range_allocator().deallocate(range_to_unmap);
  399. // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
  400. for (auto* new_region : new_regions) {
  401. new_region->map(space().page_directory());
  402. }
  403. if (auto* event_buffer = current_perf_events_buffer()) {
  404. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr);
  405. }
  406. return 0;
  407. }
  408. // Try again while checkin multiple regions at a time
  409. // slow: without caching
  410. const auto& regions = space().find_regions_intersecting(range_to_unmap);
  411. // Check if any of the regions is not mmapped, to not accidentally
  412. // error-out with just half a region map left
  413. for (auto* region : regions) {
  414. if (!region->is_mmap())
  415. return EPERM;
  416. }
  417. Vector<Region*, 2> new_regions;
  418. for (auto* old_region : regions) {
  419. // if it's a full match we can delete the complete old region
  420. if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
  421. bool res = space().deallocate_region(*old_region);
  422. VERIFY(res);
  423. continue;
  424. }
  425. // Remove the old region from our regions tree, since were going to add another region
  426. // with the exact same start address, but dont deallocate it yet
  427. auto region = space().take_region(*old_region);
  428. VERIFY(region);
  429. // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
  430. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  431. // Otherwise just split the regions and collect them for future mapping
  432. if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap)))
  433. return ENOMEM;
  434. }
  435. // Instead we give back the unwanted VM manually at the end.
  436. space().page_directory().range_allocator().deallocate(range_to_unmap);
  437. // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
  438. for (auto* new_region : new_regions) {
  439. new_region->map(space().page_directory());
  440. }
  441. if (auto* event_buffer = current_perf_events_buffer()) {
  442. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr);
  443. }
  444. return 0;
  445. }
  446. KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_params)
  447. {
  448. REQUIRE_PROMISE(stdio);
  449. Syscall::SC_mremap_params params {};
  450. if (!copy_from_user(&params, user_params))
  451. return EFAULT;
  452. auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.old_address, params.old_size);
  453. if (range_or_error.is_error())
  454. return range_or_error.error().error();
  455. auto old_range = range_or_error.value();
  456. auto* old_region = space().find_region_from_range(old_range);
  457. if (!old_region)
  458. return EINVAL;
  459. if (!old_region->is_mmap())
  460. return EPERM;
  461. if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
  462. auto range = old_region->range();
  463. auto old_name = old_region->name();
  464. auto old_prot = region_access_flags_to_prot(old_region->access());
  465. auto old_offset = old_region->offset_in_vmobject();
  466. NonnullRefPtr inode = static_cast<SharedInodeVMObject&>(old_region->vmobject()).inode();
  467. // Unmap without deallocating the VM range since we're going to reuse it.
  468. old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  469. bool success = space().deallocate_region(*old_region);
  470. VERIFY(success);
  471. auto new_vmobject = PrivateInodeVMObject::create_with_inode(inode);
  472. auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject, old_offset, old_name, old_prot, false);
  473. if (new_region_or_error.is_error())
  474. return new_region_or_error.error().error();
  475. auto& new_region = *new_region_or_error.value();
  476. new_region.set_mmap(true);
  477. return new_region.vaddr().get();
  478. }
  479. dbgln("sys$mremap: Unimplemented remap request (flags={})", params.flags);
  480. return ENOTIMPL;
  481. }
  482. KResultOr<FlatPtr> Process::sys$allocate_tls(size_t size)
  483. {
  484. REQUIRE_PROMISE(stdio);
  485. if (!size)
  486. return EINVAL;
  487. if (!m_master_tls_region.is_null())
  488. return EEXIST;
  489. if (thread_count() != 1)
  490. return EFAULT;
  491. Thread* main_thread = nullptr;
  492. for_each_thread([&main_thread](auto& thread) {
  493. main_thread = &thread;
  494. return IterationDecision::Break;
  495. });
  496. VERIFY(main_thread);
  497. auto range = space().allocate_range({}, size);
  498. if (!range.has_value())
  499. return ENOMEM;
  500. auto region_or_error = space().allocate_region(range.value(), String(), PROT_READ | PROT_WRITE);
  501. if (region_or_error.is_error())
  502. return region_or_error.error().error();
  503. m_master_tls_region = region_or_error.value()->make_weak_ptr();
  504. m_master_tls_size = size;
  505. m_master_tls_alignment = PAGE_SIZE;
  506. auto tsr_result = main_thread->make_thread_specific_region({});
  507. if (tsr_result.is_error())
  508. return EFAULT;
  509. auto& tls_descriptor = Processor::current().get_gdt_entry(GDT_SELECTOR_TLS);
  510. tls_descriptor.set_base(main_thread->thread_specific_data());
  511. tls_descriptor.set_limit(main_thread->thread_specific_region_size());
  512. return m_master_tls_region.unsafe_ptr()->vaddr().get();
  513. }
  514. KResultOr<int> Process::sys$msyscall(Userspace<void*> address)
  515. {
  516. if (space().enforces_syscall_regions())
  517. return EPERM;
  518. if (!address) {
  519. space().set_enforces_syscall_regions(true);
  520. return 0;
  521. }
  522. if (!is_user_address(VirtualAddress { address }))
  523. return EFAULT;
  524. auto* region = space().find_region_containing(Range { VirtualAddress { address }, 1 });
  525. if (!region)
  526. return EINVAL;
  527. if (!region->is_mmap())
  528. return EINVAL;
  529. region->set_syscall_region(true);
  530. return 0;
  531. }
  532. }