StdLib.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/String.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/Arch/x86/CPU.h>
  11. #include <Kernel/Arch/x86/SmapDisabler.h>
  12. #include <Kernel/Heap/kmalloc.h>
  13. #include <Kernel/StdLib.h>
  14. #include <Kernel/VM/MemoryManager.h>
  15. String copy_string_from_user(const char* user_str, size_t user_str_size)
  16. {
  17. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  18. if (!is_user)
  19. return {};
  20. Kernel::SmapDisabler disabler;
  21. void* fault_at;
  22. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  23. if (length < 0) {
  24. dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  25. return {};
  26. }
  27. if (length == 0)
  28. return String::empty();
  29. char* buffer;
  30. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  31. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  32. dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  33. return {};
  34. }
  35. return copied_string;
  36. }
  37. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  38. {
  39. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  40. }
  41. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  42. {
  43. timespec ts;
  44. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  45. return {};
  46. }
  47. return Time::from_timespec(ts);
  48. }
  49. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  50. {
  51. timeval tv;
  52. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  53. return {};
  54. }
  55. return Time::from_timeval(tv);
  56. }
  57. template<>
  58. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  59. template<>
  60. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  61. template<>
  62. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  63. template<>
  64. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  65. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  66. {
  67. if (FlatPtr(var) & 3)
  68. return {}; // not aligned!
  69. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  70. if (!is_user)
  71. return {};
  72. Kernel::SmapDisabler disabler;
  73. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  74. }
  75. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  76. {
  77. if (FlatPtr(var) & 3)
  78. return {}; // not aligned!
  79. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  80. if (!is_user)
  81. return {};
  82. Kernel::SmapDisabler disabler;
  83. return Kernel::safe_atomic_exchange_relaxed(var, val);
  84. }
  85. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  86. {
  87. if (FlatPtr(var) & 3)
  88. return {}; // not aligned!
  89. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  90. if (!is_user)
  91. return {};
  92. Kernel::SmapDisabler disabler;
  93. return Kernel::safe_atomic_load_relaxed(var);
  94. }
  95. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  96. {
  97. if (FlatPtr(var) & 3)
  98. return false; // not aligned!
  99. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  100. if (!is_user)
  101. return false;
  102. Kernel::SmapDisabler disabler;
  103. return Kernel::safe_atomic_store_relaxed(var, val);
  104. }
  105. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  106. {
  107. if (FlatPtr(var) & 3)
  108. return {}; // not aligned!
  109. VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  110. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  111. if (!is_user)
  112. return {};
  113. Kernel::SmapDisabler disabler;
  114. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  115. }
  116. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  117. {
  118. if (FlatPtr(var) & 3)
  119. return {}; // not aligned!
  120. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  121. if (!is_user)
  122. return {};
  123. Kernel::SmapDisabler disabler;
  124. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  125. }
  126. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  127. {
  128. if (FlatPtr(var) & 3)
  129. return {}; // not aligned!
  130. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  131. if (!is_user)
  132. return {};
  133. Kernel::SmapDisabler disabler;
  134. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  135. }
  136. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  137. {
  138. if (FlatPtr(var) & 3)
  139. return {}; // not aligned!
  140. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  141. if (!is_user)
  142. return {};
  143. Kernel::SmapDisabler disabler;
  144. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  145. }
  146. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  147. {
  148. if (FlatPtr(var) & 3)
  149. return {}; // not aligned!
  150. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  151. if (!is_user)
  152. return {};
  153. Kernel::SmapDisabler disabler;
  154. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  155. }
  156. extern "C" {
  157. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  158. {
  159. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  160. if (!is_user)
  161. return false;
  162. VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  163. Kernel::SmapDisabler disabler;
  164. void* fault_at;
  165. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  166. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  167. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  168. return false;
  169. }
  170. return true;
  171. }
  172. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  173. {
  174. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  175. if (!is_user)
  176. return false;
  177. VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  178. Kernel::SmapDisabler disabler;
  179. void* fault_at;
  180. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  181. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  182. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  183. return false;
  184. }
  185. return true;
  186. }
  187. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  188. {
  189. size_t dest = (size_t)dest_ptr;
  190. size_t src = (size_t)src_ptr;
  191. // FIXME: Support starting at an unaligned address.
  192. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  193. size_t size_ts = n / sizeof(size_t);
  194. asm volatile(
  195. "rep movsl\n"
  196. : "=S"(src), "=D"(dest)
  197. : "S"(src), "D"(dest), "c"(size_ts)
  198. : "memory");
  199. n -= size_ts * sizeof(size_t);
  200. if (n == 0)
  201. return dest_ptr;
  202. }
  203. asm volatile(
  204. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  205. : "memory");
  206. return dest_ptr;
  207. }
  208. void* memmove(void* dest, const void* src, size_t n)
  209. {
  210. if (dest < src)
  211. return memcpy(dest, src, n);
  212. u8* pd = (u8*)dest;
  213. const u8* ps = (const u8*)src;
  214. for (pd += n, ps += n; n--;)
  215. *--pd = *--ps;
  216. return dest;
  217. }
  218. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  219. {
  220. return AK::memmem(haystack, haystack_length, needle, needle_length);
  221. }
  222. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  223. {
  224. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  225. if (!is_user)
  226. return false;
  227. Kernel::SmapDisabler disabler;
  228. void* fault_at;
  229. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  230. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  231. return false;
  232. }
  233. return true;
  234. }
  235. void* memset(void* dest_ptr, int c, size_t n)
  236. {
  237. size_t dest = (size_t)dest_ptr;
  238. // FIXME: Support starting at an unaligned address.
  239. if (!(dest & 0x3) && n >= 12) {
  240. size_t size_ts = n / sizeof(size_t);
  241. size_t expanded_c = explode_byte((u8)c);
  242. asm volatile(
  243. "rep stosl\n"
  244. : "=D"(dest)
  245. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  246. : "memory");
  247. n -= size_ts * sizeof(size_t);
  248. if (n == 0)
  249. return dest_ptr;
  250. }
  251. asm volatile(
  252. "rep stosb\n"
  253. : "=D"(dest), "=c"(n)
  254. : "0"(dest), "1"(n), "a"(c)
  255. : "memory");
  256. return dest_ptr;
  257. }
  258. size_t strlen(const char* str)
  259. {
  260. size_t len = 0;
  261. while (*(str++))
  262. ++len;
  263. return len;
  264. }
  265. size_t strnlen(const char* str, size_t maxlen)
  266. {
  267. size_t len = 0;
  268. for (; len < maxlen && *str; str++)
  269. len++;
  270. return len;
  271. }
  272. int strcmp(const char* s1, const char* s2)
  273. {
  274. for (; *s1 == *s2; ++s1, ++s2) {
  275. if (*s1 == 0)
  276. return 0;
  277. }
  278. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  279. }
  280. int memcmp(const void* v1, const void* v2, size_t n)
  281. {
  282. auto* s1 = (const u8*)v1;
  283. auto* s2 = (const u8*)v2;
  284. while (n-- > 0) {
  285. if (*s1++ != *s2++)
  286. return s1[-1] < s2[-1] ? -1 : 1;
  287. }
  288. return 0;
  289. }
  290. int strncmp(const char* s1, const char* s2, size_t n)
  291. {
  292. if (!n)
  293. return 0;
  294. do {
  295. if (*s1 != *s2++)
  296. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  297. if (*s1++ == 0)
  298. break;
  299. } while (--n);
  300. return 0;
  301. }
  302. char* strstr(const char* haystack, const char* needle)
  303. {
  304. char nch;
  305. char hch;
  306. if ((nch = *needle++) != 0) {
  307. size_t len = strlen(needle);
  308. do {
  309. do {
  310. if ((hch = *haystack++) == 0)
  311. return nullptr;
  312. } while (hch != nch);
  313. } while (strncmp(haystack, needle, len) != 0);
  314. --haystack;
  315. }
  316. return const_cast<char*>(haystack);
  317. }
  318. void* realloc(void* p, size_t s)
  319. {
  320. return krealloc(p, s);
  321. }
  322. void free(void* p)
  323. {
  324. return kfree(p);
  325. }
  326. // Functions that are automatically called by the C++ compiler.
  327. // Declare them first, to tell the silly compiler that they are indeed being used.
  328. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  329. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  330. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  331. [[noreturn]] void __cxa_pure_virtual();
  332. [[noreturn]] void __stack_chk_fail()
  333. {
  334. VERIFY_NOT_REACHED();
  335. }
  336. [[noreturn]] void __stack_chk_fail_local()
  337. {
  338. VERIFY_NOT_REACHED();
  339. }
  340. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  341. {
  342. VERIFY_NOT_REACHED();
  343. return 0;
  344. }
  345. [[noreturn]] void __cxa_pure_virtual()
  346. {
  347. VERIFY_NOT_REACHED();
  348. }
  349. }