StdLib.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/String.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/Arch/x86/CPU.h>
  11. #include <Kernel/Arch/x86/SmapDisabler.h>
  12. #include <Kernel/Heap/kmalloc.h>
  13. #include <Kernel/StdLib.h>
  14. #include <Kernel/VM/MemoryManager.h>
  15. String copy_string_from_user(const char* user_str, size_t user_str_size)
  16. {
  17. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  18. if (!is_user)
  19. return {};
  20. Kernel::SmapDisabler disabler;
  21. void* fault_at;
  22. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  23. if (length < 0) {
  24. dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  25. return {};
  26. }
  27. if (length == 0)
  28. return String::empty();
  29. char* buffer;
  30. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  31. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  32. dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  33. return {};
  34. }
  35. return copied_string;
  36. }
  37. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  38. {
  39. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  40. }
  41. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(const char* user_str, size_t user_str_size)
  42. {
  43. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  44. if (!is_user)
  45. return EFAULT;
  46. Kernel::SmapDisabler disabler;
  47. void* fault_at;
  48. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  49. if (length < 0) {
  50. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  51. return EFAULT;
  52. }
  53. char* buffer;
  54. auto new_string = Kernel::KString::try_create_uninitialized(length, buffer);
  55. if (!new_string)
  56. return ENOMEM;
  57. buffer[length] = '\0';
  58. if (length == 0)
  59. return new_string.release_nonnull();
  60. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  61. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  62. return EFAULT;
  63. }
  64. return new_string.release_nonnull();
  65. }
  66. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
  67. {
  68. return try_copy_kstring_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  69. }
  70. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  71. {
  72. timespec ts;
  73. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  74. return {};
  75. }
  76. return Time::from_timespec(ts);
  77. }
  78. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  79. {
  80. timeval tv;
  81. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  82. return {};
  83. }
  84. return Time::from_timeval(tv);
  85. }
  86. template<>
  87. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  88. template<>
  89. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  90. template<>
  91. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  92. template<>
  93. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  94. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  95. {
  96. if (FlatPtr(var) & 3)
  97. return {}; // not aligned!
  98. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  99. if (!is_user)
  100. return {};
  101. Kernel::SmapDisabler disabler;
  102. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  103. }
  104. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  105. {
  106. if (FlatPtr(var) & 3)
  107. return {}; // not aligned!
  108. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  109. if (!is_user)
  110. return {};
  111. Kernel::SmapDisabler disabler;
  112. return Kernel::safe_atomic_exchange_relaxed(var, val);
  113. }
  114. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  115. {
  116. if (FlatPtr(var) & 3)
  117. return {}; // not aligned!
  118. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  119. if (!is_user)
  120. return {};
  121. Kernel::SmapDisabler disabler;
  122. return Kernel::safe_atomic_load_relaxed(var);
  123. }
  124. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  125. {
  126. if (FlatPtr(var) & 3)
  127. return false; // not aligned!
  128. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  129. if (!is_user)
  130. return false;
  131. Kernel::SmapDisabler disabler;
  132. return Kernel::safe_atomic_store_relaxed(var, val);
  133. }
  134. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  135. {
  136. if (FlatPtr(var) & 3)
  137. return {}; // not aligned!
  138. VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  139. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  140. if (!is_user)
  141. return {};
  142. Kernel::SmapDisabler disabler;
  143. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  144. }
  145. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  146. {
  147. if (FlatPtr(var) & 3)
  148. return {}; // not aligned!
  149. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  150. if (!is_user)
  151. return {};
  152. Kernel::SmapDisabler disabler;
  153. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  154. }
  155. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  156. {
  157. if (FlatPtr(var) & 3)
  158. return {}; // not aligned!
  159. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  160. if (!is_user)
  161. return {};
  162. Kernel::SmapDisabler disabler;
  163. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  164. }
  165. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  166. {
  167. if (FlatPtr(var) & 3)
  168. return {}; // not aligned!
  169. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  170. if (!is_user)
  171. return {};
  172. Kernel::SmapDisabler disabler;
  173. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  174. }
  175. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  176. {
  177. if (FlatPtr(var) & 3)
  178. return {}; // not aligned!
  179. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  180. if (!is_user)
  181. return {};
  182. Kernel::SmapDisabler disabler;
  183. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  184. }
  185. extern "C" {
  186. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  187. {
  188. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  189. if (!is_user)
  190. return false;
  191. VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  192. Kernel::SmapDisabler disabler;
  193. void* fault_at;
  194. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  195. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  196. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  197. return false;
  198. }
  199. return true;
  200. }
  201. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  202. {
  203. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  204. if (!is_user)
  205. return false;
  206. VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  207. Kernel::SmapDisabler disabler;
  208. void* fault_at;
  209. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  210. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  211. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  212. return false;
  213. }
  214. return true;
  215. }
  216. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  217. {
  218. size_t dest = (size_t)dest_ptr;
  219. size_t src = (size_t)src_ptr;
  220. // FIXME: Support starting at an unaligned address.
  221. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  222. size_t size_ts = n / sizeof(size_t);
  223. asm volatile(
  224. "rep movsl\n"
  225. : "=S"(src), "=D"(dest)
  226. : "S"(src), "D"(dest), "c"(size_ts)
  227. : "memory");
  228. n -= size_ts * sizeof(size_t);
  229. if (n == 0)
  230. return dest_ptr;
  231. }
  232. asm volatile(
  233. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  234. : "memory");
  235. return dest_ptr;
  236. }
  237. void* memmove(void* dest, const void* src, size_t n)
  238. {
  239. if (dest < src)
  240. return memcpy(dest, src, n);
  241. u8* pd = (u8*)dest;
  242. const u8* ps = (const u8*)src;
  243. for (pd += n, ps += n; n--;)
  244. *--pd = *--ps;
  245. return dest;
  246. }
  247. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  248. {
  249. return AK::memmem(haystack, haystack_length, needle, needle_length);
  250. }
  251. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  252. {
  253. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  254. if (!is_user)
  255. return false;
  256. Kernel::SmapDisabler disabler;
  257. void* fault_at;
  258. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  259. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  260. return false;
  261. }
  262. return true;
  263. }
  264. void* memset(void* dest_ptr, int c, size_t n)
  265. {
  266. size_t dest = (size_t)dest_ptr;
  267. // FIXME: Support starting at an unaligned address.
  268. if (!(dest & 0x3) && n >= 12) {
  269. size_t size_ts = n / sizeof(size_t);
  270. size_t expanded_c = explode_byte((u8)c);
  271. asm volatile(
  272. "rep stosl\n"
  273. : "=D"(dest)
  274. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  275. : "memory");
  276. n -= size_ts * sizeof(size_t);
  277. if (n == 0)
  278. return dest_ptr;
  279. }
  280. asm volatile(
  281. "rep stosb\n"
  282. : "=D"(dest), "=c"(n)
  283. : "0"(dest), "1"(n), "a"(c)
  284. : "memory");
  285. return dest_ptr;
  286. }
  287. size_t strlen(const char* str)
  288. {
  289. size_t len = 0;
  290. while (*(str++))
  291. ++len;
  292. return len;
  293. }
  294. size_t strnlen(const char* str, size_t maxlen)
  295. {
  296. size_t len = 0;
  297. for (; len < maxlen && *str; str++)
  298. len++;
  299. return len;
  300. }
  301. int strcmp(const char* s1, const char* s2)
  302. {
  303. for (; *s1 == *s2; ++s1, ++s2) {
  304. if (*s1 == 0)
  305. return 0;
  306. }
  307. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  308. }
  309. int memcmp(const void* v1, const void* v2, size_t n)
  310. {
  311. auto* s1 = (const u8*)v1;
  312. auto* s2 = (const u8*)v2;
  313. while (n-- > 0) {
  314. if (*s1++ != *s2++)
  315. return s1[-1] < s2[-1] ? -1 : 1;
  316. }
  317. return 0;
  318. }
  319. int strncmp(const char* s1, const char* s2, size_t n)
  320. {
  321. if (!n)
  322. return 0;
  323. do {
  324. if (*s1 != *s2++)
  325. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  326. if (*s1++ == 0)
  327. break;
  328. } while (--n);
  329. return 0;
  330. }
  331. char* strstr(const char* haystack, const char* needle)
  332. {
  333. char nch;
  334. char hch;
  335. if ((nch = *needle++) != 0) {
  336. size_t len = strlen(needle);
  337. do {
  338. do {
  339. if ((hch = *haystack++) == 0)
  340. return nullptr;
  341. } while (hch != nch);
  342. } while (strncmp(haystack, needle, len) != 0);
  343. --haystack;
  344. }
  345. return const_cast<char*>(haystack);
  346. }
  347. void* realloc(void* p, size_t s)
  348. {
  349. return krealloc(p, s);
  350. }
  351. void free(void* p)
  352. {
  353. return kfree(p);
  354. }
  355. // Functions that are automatically called by the C++ compiler.
  356. // Declare them first, to tell the silly compiler that they are indeed being used.
  357. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  358. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  359. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  360. [[noreturn]] void __cxa_pure_virtual();
  361. [[noreturn]] void __stack_chk_fail()
  362. {
  363. VERIFY_NOT_REACHED();
  364. }
  365. [[noreturn]] void __stack_chk_fail_local()
  366. {
  367. VERIFY_NOT_REACHED();
  368. }
  369. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  370. {
  371. VERIFY_NOT_REACHED();
  372. return 0;
  373. }
  374. [[noreturn]] void __cxa_pure_virtual()
  375. {
  376. VERIFY_NOT_REACHED();
  377. }
  378. }