StdLib.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/MemMem.h>
  28. #include <AK/String.h>
  29. #include <AK/Types.h>
  30. #include <Kernel/Arch/x86/CPU.h>
  31. #include <Kernel/Arch/x86/SmapDisabler.h>
  32. #include <Kernel/Heap/kmalloc.h>
  33. #include <Kernel/StdLib.h>
  34. #include <Kernel/VM/MemoryManager.h>
  35. String copy_string_from_user(const char* user_str, size_t user_str_size)
  36. {
  37. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  38. if (!is_user)
  39. return {};
  40. Kernel::SmapDisabler disabler;
  41. void* fault_at;
  42. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  43. if (length < 0) {
  44. dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  45. return {};
  46. }
  47. if (length == 0)
  48. return String::empty();
  49. char* buffer;
  50. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  51. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  52. dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  53. return {};
  54. }
  55. return copied_string;
  56. }
  57. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  58. {
  59. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  60. }
  61. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  62. {
  63. timespec ts;
  64. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  65. return {};
  66. }
  67. return Time::from_timespec(ts);
  68. }
  69. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  70. {
  71. timeval tv;
  72. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  73. return {};
  74. }
  75. return Time::from_timeval(tv);
  76. }
  77. template<>
  78. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  79. template<>
  80. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  81. template<>
  82. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  83. template<>
  84. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  85. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  86. {
  87. if (FlatPtr(var) & 3)
  88. return {}; // not aligned!
  89. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  90. if (!is_user)
  91. return {};
  92. Kernel::SmapDisabler disabler;
  93. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  94. }
  95. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  96. {
  97. if (FlatPtr(var) & 3)
  98. return {}; // not aligned!
  99. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  100. if (!is_user)
  101. return {};
  102. Kernel::SmapDisabler disabler;
  103. return Kernel::safe_atomic_exchange_relaxed(var, val);
  104. }
  105. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  106. {
  107. if (FlatPtr(var) & 3)
  108. return {}; // not aligned!
  109. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  110. if (!is_user)
  111. return {};
  112. Kernel::SmapDisabler disabler;
  113. return Kernel::safe_atomic_load_relaxed(var);
  114. }
  115. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  116. {
  117. if (FlatPtr(var) & 3)
  118. return false; // not aligned!
  119. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  120. if (!is_user)
  121. return false;
  122. Kernel::SmapDisabler disabler;
  123. return Kernel::safe_atomic_store_relaxed(var, val);
  124. }
  125. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  126. {
  127. if (FlatPtr(var) & 3)
  128. return {}; // not aligned!
  129. VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  130. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  131. if (!is_user)
  132. return {};
  133. Kernel::SmapDisabler disabler;
  134. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  135. }
  136. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  137. {
  138. if (FlatPtr(var) & 3)
  139. return {}; // not aligned!
  140. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  141. if (!is_user)
  142. return {};
  143. Kernel::SmapDisabler disabler;
  144. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  145. }
  146. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  147. {
  148. if (FlatPtr(var) & 3)
  149. return {}; // not aligned!
  150. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  151. if (!is_user)
  152. return {};
  153. Kernel::SmapDisabler disabler;
  154. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  155. }
  156. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  157. {
  158. if (FlatPtr(var) & 3)
  159. return {}; // not aligned!
  160. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  161. if (!is_user)
  162. return {};
  163. Kernel::SmapDisabler disabler;
  164. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  165. }
  166. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  167. {
  168. if (FlatPtr(var) & 3)
  169. return {}; // not aligned!
  170. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  171. if (!is_user)
  172. return {};
  173. Kernel::SmapDisabler disabler;
  174. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  175. }
  176. extern "C" {
  177. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  178. {
  179. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  180. if (!is_user)
  181. return false;
  182. VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  183. Kernel::SmapDisabler disabler;
  184. void* fault_at;
  185. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  186. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  187. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  188. return false;
  189. }
  190. return true;
  191. }
  192. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  193. {
  194. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  195. if (!is_user)
  196. return false;
  197. VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  198. Kernel::SmapDisabler disabler;
  199. void* fault_at;
  200. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  201. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  202. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  203. return false;
  204. }
  205. return true;
  206. }
  207. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  208. {
  209. size_t dest = (size_t)dest_ptr;
  210. size_t src = (size_t)src_ptr;
  211. // FIXME: Support starting at an unaligned address.
  212. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  213. size_t size_ts = n / sizeof(size_t);
  214. asm volatile(
  215. "rep movsl\n"
  216. : "=S"(src), "=D"(dest)
  217. : "S"(src), "D"(dest), "c"(size_ts)
  218. : "memory");
  219. n -= size_ts * sizeof(size_t);
  220. if (n == 0)
  221. return dest_ptr;
  222. }
  223. asm volatile(
  224. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  225. : "memory");
  226. return dest_ptr;
  227. }
  228. void* memmove(void* dest, const void* src, size_t n)
  229. {
  230. if (dest < src)
  231. return memcpy(dest, src, n);
  232. u8* pd = (u8*)dest;
  233. const u8* ps = (const u8*)src;
  234. for (pd += n, ps += n; n--;)
  235. *--pd = *--ps;
  236. return dest;
  237. }
  238. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  239. {
  240. return AK::memmem(haystack, haystack_length, needle, needle_length);
  241. }
  242. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  243. {
  244. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  245. if (!is_user)
  246. return false;
  247. Kernel::SmapDisabler disabler;
  248. void* fault_at;
  249. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  250. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  251. return false;
  252. }
  253. return true;
  254. }
  255. void* memset(void* dest_ptr, int c, size_t n)
  256. {
  257. size_t dest = (size_t)dest_ptr;
  258. // FIXME: Support starting at an unaligned address.
  259. if (!(dest & 0x3) && n >= 12) {
  260. size_t size_ts = n / sizeof(size_t);
  261. size_t expanded_c = explode_byte((u8)c);
  262. asm volatile(
  263. "rep stosl\n"
  264. : "=D"(dest)
  265. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  266. : "memory");
  267. n -= size_ts * sizeof(size_t);
  268. if (n == 0)
  269. return dest_ptr;
  270. }
  271. asm volatile(
  272. "rep stosb\n"
  273. : "=D"(dest), "=c"(n)
  274. : "0"(dest), "1"(n), "a"(c)
  275. : "memory");
  276. return dest_ptr;
  277. }
  278. size_t strlen(const char* str)
  279. {
  280. size_t len = 0;
  281. while (*(str++))
  282. ++len;
  283. return len;
  284. }
  285. size_t strnlen(const char* str, size_t maxlen)
  286. {
  287. size_t len = 0;
  288. for (; len < maxlen && *str; str++)
  289. len++;
  290. return len;
  291. }
  292. int strcmp(const char* s1, const char* s2)
  293. {
  294. for (; *s1 == *s2; ++s1, ++s2) {
  295. if (*s1 == 0)
  296. return 0;
  297. }
  298. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  299. }
  300. int memcmp(const void* v1, const void* v2, size_t n)
  301. {
  302. auto* s1 = (const u8*)v1;
  303. auto* s2 = (const u8*)v2;
  304. while (n-- > 0) {
  305. if (*s1++ != *s2++)
  306. return s1[-1] < s2[-1] ? -1 : 1;
  307. }
  308. return 0;
  309. }
  310. int strncmp(const char* s1, const char* s2, size_t n)
  311. {
  312. if (!n)
  313. return 0;
  314. do {
  315. if (*s1 != *s2++)
  316. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  317. if (*s1++ == 0)
  318. break;
  319. } while (--n);
  320. return 0;
  321. }
  322. char* strstr(const char* haystack, const char* needle)
  323. {
  324. char nch;
  325. char hch;
  326. if ((nch = *needle++) != 0) {
  327. size_t len = strlen(needle);
  328. do {
  329. do {
  330. if ((hch = *haystack++) == 0)
  331. return nullptr;
  332. } while (hch != nch);
  333. } while (strncmp(haystack, needle, len) != 0);
  334. --haystack;
  335. }
  336. return const_cast<char*>(haystack);
  337. }
  338. void* realloc(void* p, size_t s)
  339. {
  340. return krealloc(p, s);
  341. }
  342. void free(void* p)
  343. {
  344. return kfree(p);
  345. }
  346. // Functions that are automatically called by the C++ compiler.
  347. // Declare them first, to tell the silly compiler that they are indeed being used.
  348. [[noreturn]] void __stack_chk_fail();
  349. [[noreturn]] void __stack_chk_fail_local();
  350. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  351. [[noreturn]] void __cxa_pure_virtual();
  352. [[noreturn]] void __stack_chk_fail()
  353. {
  354. VERIFY_NOT_REACHED();
  355. }
  356. [[noreturn]] void __stack_chk_fail_local()
  357. {
  358. VERIFY_NOT_REACHED();
  359. }
  360. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  361. {
  362. VERIFY_NOT_REACHED();
  363. return 0;
  364. }
  365. [[noreturn]] void __cxa_pure_virtual()
  366. {
  367. VERIFY_NOT_REACHED();
  368. }
  369. }