mmap.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <Kernel/Arch/x86/SmapDisabler.h>
  8. #include <Kernel/FileSystem/FileDescription.h>
  9. #include <Kernel/PerformanceEventBuffer.h>
  10. #include <Kernel/PerformanceManager.h>
  11. #include <Kernel/Process.h>
  12. #include <Kernel/VM/MemoryManager.h>
  13. #include <Kernel/VM/PageDirectory.h>
  14. #include <Kernel/VM/PrivateInodeVMObject.h>
  15. #include <Kernel/VM/Region.h>
  16. #include <Kernel/VM/SharedInodeVMObject.h>
  17. #include <LibC/limits.h>
  18. #include <LibELF/Validation.h>
  19. namespace Kernel {
  20. static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, const Region& region)
  21. {
  22. // Normally we don't allow W -> X transitions, but we have to make an exception
  23. // for the dynamic loader, which needs to do this after performing text relocations.
  24. // FIXME: Investigate whether we could get rid of all text relocations entirely.
  25. // The exception is only made if all the following criteria is fulfilled:
  26. // The region must be RW
  27. if (!(region.is_readable() && region.is_writable() && !region.is_executable()))
  28. return false;
  29. // The region wants to become RX
  30. if (!(make_readable && !make_writable && make_executable))
  31. return false;
  32. // The region is backed by a file
  33. if (!region.vmobject().is_inode())
  34. return false;
  35. // The file mapping is private, not shared (no relocations in a shared mapping!)
  36. if (!region.vmobject().is_private_inode())
  37. return false;
  38. auto& inode_vm = static_cast<const InodeVMObject&>(region.vmobject());
  39. auto& inode = inode_vm.inode();
  40. Elf32_Ehdr header;
  41. auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&header);
  42. auto result = inode.read_bytes(0, sizeof(header), buffer, nullptr);
  43. if (result.is_error() || result.value() != sizeof(header))
  44. return false;
  45. // The file is a valid ELF binary
  46. if (!ELF::validate_elf_header(header, inode.size()))
  47. return false;
  48. // The file is an ELF shared object
  49. if (header.e_type != ET_DYN)
  50. return false;
  51. // FIXME: Are there any additional checks/validations we could do here?
  52. return true;
  53. }
  54. static bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, const Region* region = nullptr)
  55. {
  56. bool make_readable = prot & PROT_READ;
  57. bool make_writable = prot & PROT_WRITE;
  58. bool make_executable = prot & PROT_EXEC;
  59. if (map_anonymous && make_executable)
  60. return false;
  61. if (make_writable && make_executable)
  62. return false;
  63. if (map_stack) {
  64. if (make_executable)
  65. return false;
  66. if (!make_readable || !make_writable)
  67. return false;
  68. }
  69. if (region) {
  70. if (make_writable && region->has_been_executable())
  71. return false;
  72. if (make_executable && region->has_been_writable()) {
  73. if (should_make_executable_exception_for_dynamic_loader(make_readable, make_writable, make_executable, *region))
  74. return true;
  75. return false;
  76. }
  77. }
  78. return true;
  79. }
  80. static bool validate_inode_mmap_prot(const Process& process, int prot, const Inode& inode, bool map_shared)
  81. {
  82. auto metadata = inode.metadata();
  83. if ((prot & PROT_READ) && !metadata.may_read(process))
  84. return false;
  85. if (map_shared) {
  86. // FIXME: What about readonly filesystem mounts? We cannot make a
  87. // decision here without knowing the mount flags, so we would need to
  88. // keep a Custody or something from mmap time.
  89. if ((prot & PROT_WRITE) && !metadata.may_write(process))
  90. return false;
  91. InterruptDisabler disabler;
  92. if (auto shared_vmobject = inode.shared_vmobject()) {
  93. if ((prot & PROT_EXEC) && shared_vmobject->writable_mappings())
  94. return false;
  95. if ((prot & PROT_WRITE) && shared_vmobject->executable_mappings())
  96. return false;
  97. }
  98. }
  99. return true;
  100. }
  101. KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
  102. {
  103. REQUIRE_PROMISE(stdio);
  104. Syscall::SC_mmap_params params;
  105. if (!copy_from_user(&params, user_params))
  106. return EFAULT;
  107. FlatPtr addr = params.addr;
  108. auto size = params.size;
  109. auto alignment = params.alignment;
  110. auto prot = params.prot;
  111. auto flags = params.flags;
  112. auto fd = params.fd;
  113. auto offset = params.offset;
  114. if (prot & PROT_EXEC) {
  115. REQUIRE_PROMISE(prot_exec);
  116. }
  117. if (prot & MAP_FIXED) {
  118. REQUIRE_PROMISE(map_fixed);
  119. }
  120. if (alignment & ~PAGE_MASK)
  121. return EINVAL;
  122. if (page_round_up_would_wrap(size))
  123. return EINVAL;
  124. if (!is_user_range(VirtualAddress(addr), page_round_up(size)))
  125. return EFAULT;
  126. String name;
  127. if (params.name.characters) {
  128. if (params.name.length > PATH_MAX)
  129. return ENAMETOOLONG;
  130. name = copy_string_from_user(params.name);
  131. if (name.is_null())
  132. return EFAULT;
  133. }
  134. if (size == 0)
  135. return EINVAL;
  136. if ((FlatPtr)addr & ~PAGE_MASK)
  137. return EINVAL;
  138. bool map_shared = flags & MAP_SHARED;
  139. bool map_anonymous = flags & MAP_ANONYMOUS;
  140. bool map_private = flags & MAP_PRIVATE;
  141. bool map_stack = flags & MAP_STACK;
  142. bool map_fixed = flags & MAP_FIXED;
  143. bool map_noreserve = flags & MAP_NORESERVE;
  144. bool map_randomized = flags & MAP_RANDOMIZED;
  145. if (map_shared && map_private)
  146. return EINVAL;
  147. if (!map_shared && !map_private)
  148. return EINVAL;
  149. if (map_fixed && map_randomized)
  150. return EINVAL;
  151. if (!validate_mmap_prot(prot, map_stack, map_anonymous))
  152. return EINVAL;
  153. if (map_stack && (!map_private || !map_anonymous))
  154. return EINVAL;
  155. Region* region = nullptr;
  156. Optional<Range> range;
  157. if (map_randomized) {
  158. range = space().page_directory().range_allocator().allocate_randomized(page_round_up(size), alignment);
  159. } else {
  160. range = space().allocate_range(VirtualAddress(addr), size, alignment);
  161. if (!range.has_value()) {
  162. if (addr && !map_fixed) {
  163. // If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
  164. range = space().allocate_range({}, size, alignment);
  165. }
  166. }
  167. }
  168. if (!range.has_value())
  169. return ENOMEM;
  170. if (map_anonymous) {
  171. auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
  172. auto region_or_error = space().allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
  173. if (region_or_error.is_error())
  174. return region_or_error.error().error();
  175. region = region_or_error.value();
  176. } else {
  177. if (offset < 0)
  178. return EINVAL;
  179. if (static_cast<size_t>(offset) & ~PAGE_MASK)
  180. return EINVAL;
  181. auto description = file_description(fd);
  182. if (!description)
  183. return EBADF;
  184. if (description->is_directory())
  185. return ENODEV;
  186. // Require read access even when read protection is not requested.
  187. if (!description->is_readable())
  188. return EACCES;
  189. if (map_shared) {
  190. if ((prot & PROT_WRITE) && !description->is_writable())
  191. return EACCES;
  192. }
  193. if (description->inode()) {
  194. if (!validate_inode_mmap_prot(*this, prot, *description->inode(), map_shared))
  195. return EACCES;
  196. }
  197. auto region_or_error = description->mmap(*this, range.value(), static_cast<u64>(offset), prot, map_shared);
  198. if (region_or_error.is_error())
  199. return region_or_error.error().error();
  200. region = region_or_error.value();
  201. }
  202. if (!region)
  203. return ENOMEM;
  204. region->set_mmap(true);
  205. if (map_shared)
  206. region->set_shared(true);
  207. if (map_stack)
  208. region->set_stack(true);
  209. if (!name.is_null())
  210. region->set_name(name);
  211. PerformanceManager::add_mmap_perf_event(*this, *region);
  212. return region->vaddr().get();
  213. }
  214. static KResultOr<Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
  215. {
  216. if (page_round_up_would_wrap(size))
  217. return EINVAL;
  218. if ((address + size) < address)
  219. return EINVAL;
  220. if (page_round_up_would_wrap(address + size))
  221. return EINVAL;
  222. auto base = VirtualAddress { address }.page_base();
  223. auto end = page_round_up(address + size);
  224. return Range { base, end - base.get() };
  225. }
  226. KResultOr<int> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
  227. {
  228. REQUIRE_PROMISE(stdio);
  229. if (prot & PROT_EXEC) {
  230. REQUIRE_PROMISE(prot_exec);
  231. }
  232. auto range_or_error = expand_range_to_page_boundaries(addr, size);
  233. if (range_or_error.is_error())
  234. return range_or_error.error();
  235. auto range_to_mprotect = range_or_error.value();
  236. if (!range_to_mprotect.size())
  237. return EINVAL;
  238. if (!is_user_range(range_to_mprotect))
  239. return EFAULT;
  240. if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) {
  241. if (!whole_region->is_mmap())
  242. return EPERM;
  243. if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
  244. return EINVAL;
  245. if (whole_region->access() == prot_to_region_access_flags(prot))
  246. return 0;
  247. if (whole_region->vmobject().is_inode()
  248. && !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(whole_region->vmobject()).inode(), whole_region->is_shared())) {
  249. return EACCES;
  250. }
  251. whole_region->set_readable(prot & PROT_READ);
  252. whole_region->set_writable(prot & PROT_WRITE);
  253. whole_region->set_executable(prot & PROT_EXEC);
  254. whole_region->remap();
  255. return 0;
  256. }
  257. // Check if we can carve out the desired range from an existing region
  258. if (auto* old_region = space().find_region_containing(range_to_mprotect)) {
  259. if (!old_region->is_mmap())
  260. return EPERM;
  261. if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
  262. return EINVAL;
  263. if (old_region->access() == prot_to_region_access_flags(prot))
  264. return 0;
  265. if (old_region->vmobject().is_inode()
  266. && !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(old_region->vmobject()).inode(), old_region->is_shared())) {
  267. return EACCES;
  268. }
  269. // Remove the old region from our regions tree, since were going to add another region
  270. // with the exact same start address, but dont deallocate it yet
  271. auto region = space().take_region(*old_region);
  272. VERIFY(region);
  273. // Unmap the old region here, specifying that we *don't* want the VM deallocated.
  274. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  275. // This vector is the region(s) adjacent to our range.
  276. // We need to allocate a new region for the range we wanted to change permission bits on.
  277. auto adjacent_regions = space().split_region_around_range(*region, range_to_mprotect);
  278. size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
  279. auto& new_region = space().allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
  280. new_region.set_readable(prot & PROT_READ);
  281. new_region.set_writable(prot & PROT_WRITE);
  282. new_region.set_executable(prot & PROT_EXEC);
  283. // Map the new regions using our page directory (they were just allocated and don't have one).
  284. for (auto* adjacent_region : adjacent_regions) {
  285. adjacent_region->map(space().page_directory());
  286. }
  287. new_region.map(space().page_directory());
  288. return 0;
  289. }
  290. // FIXME: We should also support mprotect() across multiple regions. (#175) (#964)
  291. return EINVAL;
  292. }
  293. KResultOr<int> Process::sys$madvise(Userspace<void*> address, size_t size, int advice)
  294. {
  295. REQUIRE_PROMISE(stdio);
  296. auto range_or_error = expand_range_to_page_boundaries(address, size);
  297. if (range_or_error.is_error())
  298. return range_or_error.error();
  299. auto range_to_madvise = range_or_error.value();
  300. if (!range_to_madvise.size())
  301. return EINVAL;
  302. if (!is_user_range(range_to_madvise))
  303. return EFAULT;
  304. auto* region = space().find_region_from_range(range_to_madvise);
  305. if (!region)
  306. return EINVAL;
  307. if (!region->is_mmap())
  308. return EPERM;
  309. bool set_volatile = advice & MADV_SET_VOLATILE;
  310. bool set_nonvolatile = advice & MADV_SET_NONVOLATILE;
  311. if (set_volatile && set_nonvolatile)
  312. return EINVAL;
  313. if (set_volatile || set_nonvolatile) {
  314. if (!region->vmobject().is_anonymous())
  315. return EPERM;
  316. bool was_purged = false;
  317. switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
  318. case Region::SetVolatileError::Success:
  319. break;
  320. case Region::SetVolatileError::NotPurgeable:
  321. return EPERM;
  322. case Region::SetVolatileError::OutOfMemory:
  323. return ENOMEM;
  324. }
  325. if (set_nonvolatile)
  326. return was_purged ? 1 : 0;
  327. return 0;
  328. }
  329. if (advice & MADV_GET_VOLATILE) {
  330. if (!region->vmobject().is_anonymous())
  331. return EPERM;
  332. return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
  333. }
  334. return EINVAL;
  335. }
  336. KResultOr<int> Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*> user_params)
  337. {
  338. REQUIRE_PROMISE(stdio);
  339. Syscall::SC_set_mmap_name_params params;
  340. if (!copy_from_user(&params, user_params))
  341. return EFAULT;
  342. if (params.name.length > PATH_MAX)
  343. return ENAMETOOLONG;
  344. auto name = copy_string_from_user(params.name);
  345. if (name.is_null())
  346. return EFAULT;
  347. auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.addr, params.size);
  348. if (range_or_error.is_error())
  349. return range_or_error.error();
  350. auto range = range_or_error.value();
  351. auto* region = space().find_region_from_range(range);
  352. if (!region)
  353. return EINVAL;
  354. if (!region->is_mmap())
  355. return EPERM;
  356. region->set_name(move(name));
  357. PerformanceManager::add_mmap_perf_event(*this, *region);
  358. return 0;
  359. }
  360. KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
  361. {
  362. REQUIRE_PROMISE(stdio);
  363. if (!size)
  364. return EINVAL;
  365. auto range_or_error = expand_range_to_page_boundaries(addr, size);
  366. if (range_or_error.is_error())
  367. return range_or_error.error();
  368. auto range_to_unmap = range_or_error.value();
  369. if (!is_user_range(range_to_unmap))
  370. return EFAULT;
  371. if (auto* whole_region = space().find_region_from_range(range_to_unmap)) {
  372. if (!whole_region->is_mmap())
  373. return EPERM;
  374. PerformanceManager::add_unmap_perf_event(*this, whole_region->range());
  375. bool success = space().deallocate_region(*whole_region);
  376. VERIFY(success);
  377. return 0;
  378. }
  379. if (auto* old_region = space().find_region_containing(range_to_unmap)) {
  380. if (!old_region->is_mmap())
  381. return EPERM;
  382. // Remove the old region from our regions tree, since were going to add another region
  383. // with the exact same start address, but dont deallocate it yet
  384. auto region = space().take_region(*old_region);
  385. VERIFY(region);
  386. // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
  387. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  388. auto new_regions = space().split_region_around_range(*region, range_to_unmap);
  389. // Instead we give back the unwanted VM manually.
  390. space().page_directory().range_allocator().deallocate(range_to_unmap);
  391. // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
  392. for (auto* new_region : new_regions) {
  393. new_region->map(space().page_directory());
  394. }
  395. if (auto* event_buffer = current_perf_events_buffer()) {
  396. [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, range_to_unmap.base().get(), range_to_unmap.size(), nullptr);
  397. }
  398. return 0;
  399. }
  400. // Try again while checkin multiple regions at a time
  401. // slow: without caching
  402. const auto& regions = space().find_regions_intersecting(range_to_unmap);
  403. // Check if any of the regions is not mmapped, to not accidentally
  404. // error-out with just half a region map left
  405. for (auto* region : regions) {
  406. if (!region->is_mmap())
  407. return EPERM;
  408. }
  409. Vector<Region*, 2> new_regions;
  410. for (auto* old_region : regions) {
  411. // if it's a full match we can delete the complete old region
  412. if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
  413. bool res = space().deallocate_region(*old_region);
  414. VERIFY(res);
  415. continue;
  416. }
  417. // Remove the old region from our regions tree, since were going to add another region
  418. // with the exact same start address, but dont deallocate it yet
  419. auto region = space().take_region(*old_region);
  420. VERIFY(region);
  421. // We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
  422. region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  423. // Otherwise just split the regions and collect them for future mapping
  424. if (new_regions.try_append(space().split_region_around_range(*region, range_to_unmap)))
  425. return ENOMEM;
  426. }
  427. // Instead we give back the unwanted VM manually at the end.
  428. space().page_directory().range_allocator().deallocate(range_to_unmap);
  429. // And finally we map the new region(s) using our page directory (they were just allocated and don't have one).
  430. for (auto* new_region : new_regions) {
  431. new_region->map(space().page_directory());
  432. }
  433. PerformanceManager::add_unmap_perf_event(*this, range_to_unmap);
  434. return 0;
  435. }
  436. KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_params)
  437. {
  438. REQUIRE_PROMISE(stdio);
  439. Syscall::SC_mremap_params params {};
  440. if (!copy_from_user(&params, user_params))
  441. return EFAULT;
  442. auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.old_address, params.old_size);
  443. if (range_or_error.is_error())
  444. return range_or_error.error().error();
  445. auto old_range = range_or_error.value();
  446. auto* old_region = space().find_region_from_range(old_range);
  447. if (!old_region)
  448. return EINVAL;
  449. if (!old_region->is_mmap())
  450. return EPERM;
  451. if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
  452. auto range = old_region->range();
  453. auto old_name = old_region->name();
  454. auto old_prot = region_access_flags_to_prot(old_region->access());
  455. auto old_offset = old_region->offset_in_vmobject();
  456. NonnullRefPtr inode = static_cast<SharedInodeVMObject&>(old_region->vmobject()).inode();
  457. // Unmap without deallocating the VM range since we're going to reuse it.
  458. old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
  459. bool success = space().deallocate_region(*old_region);
  460. VERIFY(success);
  461. auto new_vmobject = PrivateInodeVMObject::create_with_inode(inode);
  462. auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject, old_offset, old_name, old_prot, false);
  463. if (new_region_or_error.is_error())
  464. return new_region_or_error.error().error();
  465. auto& new_region = *new_region_or_error.value();
  466. new_region.set_mmap(true);
  467. return new_region.vaddr().get();
  468. }
  469. dbgln("sys$mremap: Unimplemented remap request (flags={})", params.flags);
  470. return ENOTIMPL;
  471. }
  472. KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data, size_t size)
  473. {
  474. REQUIRE_PROMISE(stdio);
  475. if (!size || size % PAGE_SIZE != 0)
  476. return EINVAL;
  477. if (!m_master_tls_region.is_null())
  478. return EEXIST;
  479. if (thread_count() != 1)
  480. return EFAULT;
  481. Thread* main_thread = nullptr;
  482. for_each_thread([&main_thread](auto& thread) {
  483. main_thread = &thread;
  484. return IterationDecision::Break;
  485. });
  486. VERIFY(main_thread);
  487. auto range = space().allocate_range({}, size);
  488. if (!range.has_value())
  489. return ENOMEM;
  490. auto region_or_error = space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE);
  491. if (region_or_error.is_error())
  492. return region_or_error.error().error();
  493. m_master_tls_region = region_or_error.value()->make_weak_ptr();
  494. m_master_tls_size = size;
  495. m_master_tls_alignment = PAGE_SIZE;
  496. {
  497. Kernel::SmapDisabler disabler;
  498. void* fault_at;
  499. if (!Kernel::safe_memcpy((char*)m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), (char*)initial_data.ptr(), size, fault_at))
  500. return EFAULT;
  501. }
  502. auto tsr_result = main_thread->make_thread_specific_region({});
  503. if (tsr_result.is_error())
  504. return EFAULT;
  505. auto& tls_descriptor = Processor::current().get_gdt_entry(GDT_SELECTOR_TLS);
  506. tls_descriptor.set_base(main_thread->thread_specific_data());
  507. tls_descriptor.set_limit(main_thread->thread_specific_region_size());
  508. return m_master_tls_region.unsafe_ptr()->vaddr().get();
  509. }
  510. KResultOr<int> Process::sys$msyscall(Userspace<void*> address)
  511. {
  512. if (space().enforces_syscall_regions())
  513. return EPERM;
  514. if (!address) {
  515. space().set_enforces_syscall_regions(true);
  516. return 0;
  517. }
  518. if (!is_user_address(VirtualAddress { address }))
  519. return EFAULT;
  520. auto* region = space().find_region_containing(Range { VirtualAddress { address }, 1 });
  521. if (!region)
  522. return EINVAL;
  523. if (!region->is_mmap())
  524. return EINVAL;
  525. region->set_syscall_region(true);
  526. return 0;
  527. }
  528. }