StdLib.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/String.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/Arch/x86/SmapDisabler.h>
  11. #include <Kernel/Heap/kmalloc.h>
  12. #include <Kernel/StdLib.h>
  13. #include <Kernel/VM/MemoryManager.h>
  14. String copy_string_from_user(const char* user_str, size_t user_str_size)
  15. {
  16. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  17. if (!is_user)
  18. return {};
  19. Kernel::SmapDisabler disabler;
  20. void* fault_at;
  21. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  22. if (length < 0) {
  23. dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  24. return {};
  25. }
  26. if (length == 0)
  27. return String::empty();
  28. char* buffer;
  29. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  30. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  31. dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  32. return {};
  33. }
  34. return copied_string;
  35. }
  36. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  37. {
  38. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  39. }
  40. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(const char* user_str, size_t user_str_size)
  41. {
  42. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  43. if (!is_user)
  44. return EFAULT;
  45. Kernel::SmapDisabler disabler;
  46. void* fault_at;
  47. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  48. if (length < 0) {
  49. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  50. return EFAULT;
  51. }
  52. char* buffer;
  53. auto new_string = Kernel::KString::try_create_uninitialized(length, buffer);
  54. if (!new_string)
  55. return ENOMEM;
  56. buffer[length] = '\0';
  57. if (length == 0)
  58. return new_string.release_nonnull();
  59. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  60. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  61. return EFAULT;
  62. }
  63. return new_string.release_nonnull();
  64. }
  65. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
  66. {
  67. return try_copy_kstring_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  68. }
  69. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  70. {
  71. timespec ts;
  72. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  73. return {};
  74. }
  75. return Time::from_timespec(ts);
  76. }
  77. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  78. {
  79. timeval tv;
  80. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  81. return {};
  82. }
  83. return Time::from_timeval(tv);
  84. }
  85. template<>
  86. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  87. template<>
  88. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  89. template<>
  90. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  91. template<>
  92. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  93. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  94. {
  95. if (FlatPtr(var) & 3)
  96. return {}; // not aligned!
  97. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  98. if (!is_user)
  99. return {};
  100. Kernel::SmapDisabler disabler;
  101. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  102. }
  103. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  104. {
  105. if (FlatPtr(var) & 3)
  106. return {}; // not aligned!
  107. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  108. if (!is_user)
  109. return {};
  110. Kernel::SmapDisabler disabler;
  111. return Kernel::safe_atomic_exchange_relaxed(var, val);
  112. }
  113. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  114. {
  115. if (FlatPtr(var) & 3)
  116. return {}; // not aligned!
  117. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  118. if (!is_user)
  119. return {};
  120. Kernel::SmapDisabler disabler;
  121. return Kernel::safe_atomic_load_relaxed(var);
  122. }
  123. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  124. {
  125. if (FlatPtr(var) & 3)
  126. return false; // not aligned!
  127. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  128. if (!is_user)
  129. return false;
  130. Kernel::SmapDisabler disabler;
  131. return Kernel::safe_atomic_store_relaxed(var, val);
  132. }
  133. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  134. {
  135. if (FlatPtr(var) & 3)
  136. return {}; // not aligned!
  137. VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  138. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  139. if (!is_user)
  140. return {};
  141. Kernel::SmapDisabler disabler;
  142. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  143. }
  144. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  145. {
  146. if (FlatPtr(var) & 3)
  147. return {}; // not aligned!
  148. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  149. if (!is_user)
  150. return {};
  151. Kernel::SmapDisabler disabler;
  152. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  153. }
  154. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  155. {
  156. if (FlatPtr(var) & 3)
  157. return {}; // not aligned!
  158. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  159. if (!is_user)
  160. return {};
  161. Kernel::SmapDisabler disabler;
  162. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  163. }
  164. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  165. {
  166. if (FlatPtr(var) & 3)
  167. return {}; // not aligned!
  168. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  169. if (!is_user)
  170. return {};
  171. Kernel::SmapDisabler disabler;
  172. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  173. }
  174. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  175. {
  176. if (FlatPtr(var) & 3)
  177. return {}; // not aligned!
  178. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  179. if (!is_user)
  180. return {};
  181. Kernel::SmapDisabler disabler;
  182. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  183. }
  184. extern "C" {
  185. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  186. {
  187. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  188. if (!is_user)
  189. return false;
  190. VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  191. Kernel::SmapDisabler disabler;
  192. void* fault_at;
  193. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  194. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  195. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  196. return false;
  197. }
  198. return true;
  199. }
  200. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  201. {
  202. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  203. if (!is_user)
  204. return false;
  205. VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  206. Kernel::SmapDisabler disabler;
  207. void* fault_at;
  208. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  209. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  210. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  211. return false;
  212. }
  213. return true;
  214. }
  215. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  216. {
  217. size_t dest = (size_t)dest_ptr;
  218. size_t src = (size_t)src_ptr;
  219. // FIXME: Support starting at an unaligned address.
  220. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  221. size_t size_ts = n / sizeof(size_t);
  222. #if ARCH(I386)
  223. asm volatile(
  224. "rep movsl\n"
  225. : "=S"(src), "=D"(dest)
  226. : "S"(src), "D"(dest), "c"(size_ts)
  227. : "memory");
  228. #else
  229. asm volatile(
  230. "rep movsq\n"
  231. : "=S"(src), "=D"(dest)
  232. : "S"(src), "D"(dest), "c"(size_ts)
  233. : "memory");
  234. #endif
  235. n -= size_ts * sizeof(size_t);
  236. if (n == 0)
  237. return dest_ptr;
  238. }
  239. asm volatile(
  240. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  241. : "memory");
  242. return dest_ptr;
  243. }
  244. void* memmove(void* dest, const void* src, size_t n)
  245. {
  246. if (dest < src)
  247. return memcpy(dest, src, n);
  248. u8* pd = (u8*)dest;
  249. const u8* ps = (const u8*)src;
  250. for (pd += n, ps += n; n--;)
  251. *--pd = *--ps;
  252. return dest;
  253. }
  254. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  255. {
  256. return AK::memmem(haystack, haystack_length, needle, needle_length);
  257. }
  258. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  259. {
  260. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  261. if (!is_user)
  262. return false;
  263. Kernel::SmapDisabler disabler;
  264. void* fault_at;
  265. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  266. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  267. return false;
  268. }
  269. return true;
  270. }
  271. void* memset(void* dest_ptr, int c, size_t n)
  272. {
  273. size_t dest = (size_t)dest_ptr;
  274. // FIXME: Support starting at an unaligned address.
  275. if (!(dest & 0x3) && n >= 12) {
  276. size_t size_ts = n / sizeof(size_t);
  277. size_t expanded_c = explode_byte((u8)c);
  278. #if ARCH(I386)
  279. asm volatile(
  280. "rep stosl\n"
  281. : "=D"(dest)
  282. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  283. : "memory");
  284. #else
  285. asm volatile(
  286. "rep stosq\n"
  287. : "=D"(dest)
  288. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  289. : "memory");
  290. #endif
  291. n -= size_ts * sizeof(size_t);
  292. if (n == 0)
  293. return dest_ptr;
  294. }
  295. asm volatile(
  296. "rep stosb\n"
  297. : "=D"(dest), "=c"(n)
  298. : "0"(dest), "1"(n), "a"(c)
  299. : "memory");
  300. return dest_ptr;
  301. }
  302. size_t strlen(const char* str)
  303. {
  304. size_t len = 0;
  305. while (*(str++))
  306. ++len;
  307. return len;
  308. }
  309. size_t strnlen(const char* str, size_t maxlen)
  310. {
  311. size_t len = 0;
  312. for (; len < maxlen && *str; str++)
  313. len++;
  314. return len;
  315. }
  316. int strcmp(const char* s1, const char* s2)
  317. {
  318. for (; *s1 == *s2; ++s1, ++s2) {
  319. if (*s1 == 0)
  320. return 0;
  321. }
  322. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  323. }
  324. int memcmp(const void* v1, const void* v2, size_t n)
  325. {
  326. auto* s1 = (const u8*)v1;
  327. auto* s2 = (const u8*)v2;
  328. while (n-- > 0) {
  329. if (*s1++ != *s2++)
  330. return s1[-1] < s2[-1] ? -1 : 1;
  331. }
  332. return 0;
  333. }
  334. int strncmp(const char* s1, const char* s2, size_t n)
  335. {
  336. if (!n)
  337. return 0;
  338. do {
  339. if (*s1 != *s2++)
  340. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  341. if (*s1++ == 0)
  342. break;
  343. } while (--n);
  344. return 0;
  345. }
  346. char* strstr(const char* haystack, const char* needle)
  347. {
  348. char nch;
  349. char hch;
  350. if ((nch = *needle++) != 0) {
  351. size_t len = strlen(needle);
  352. do {
  353. do {
  354. if ((hch = *haystack++) == 0)
  355. return nullptr;
  356. } while (hch != nch);
  357. } while (strncmp(haystack, needle, len) != 0);
  358. --haystack;
  359. }
  360. return const_cast<char*>(haystack);
  361. }
  362. // Functions that are automatically called by the C++ compiler.
  363. // Declare them first, to tell the silly compiler that they are indeed being used.
  364. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  365. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  366. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  367. [[noreturn]] void __cxa_pure_virtual();
  368. [[noreturn]] void __stack_chk_fail()
  369. {
  370. VERIFY_NOT_REACHED();
  371. }
  372. [[noreturn]] void __stack_chk_fail_local()
  373. {
  374. VERIFY_NOT_REACHED();
  375. }
  376. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  377. {
  378. VERIFY_NOT_REACHED();
  379. return 0;
  380. }
  381. [[noreturn]] void __cxa_pure_virtual()
  382. {
  383. VERIFY_NOT_REACHED();
  384. }
  385. }