StdLib.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/MemMem.h>
  28. #include <AK/String.h>
  29. #include <AK/Types.h>
  30. #include <Kernel/Arch/i386/CPU.h>
  31. #include <Kernel/Heap/kmalloc.h>
  32. #include <Kernel/StdLib.h>
  33. #include <Kernel/VM/MemoryManager.h>
  34. String copy_string_from_user(const char* user_str, size_t user_str_size)
  35. {
  36. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  37. if (!is_user)
  38. return {};
  39. Kernel::SmapDisabler disabler;
  40. void* fault_at;
  41. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  42. if (length < 0) {
  43. klog() << "copy_string_from_user(" << static_cast<const void*>(user_str) << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (strnlen)";
  44. return {};
  45. }
  46. if (length == 0)
  47. return String::empty();
  48. char* buffer;
  49. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  50. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  51. klog() << "copy_string_from_user(" << static_cast<const void*>(user_str) << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (memcpy)";
  52. return {};
  53. }
  54. return copied_string;
  55. }
  56. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  57. {
  58. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  59. }
  60. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  61. {
  62. if (FlatPtr(var) & 3)
  63. return {}; // not aligned!
  64. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  65. if (!is_user)
  66. return {};
  67. Kernel::SmapDisabler disabler;
  68. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  69. }
  70. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  71. {
  72. if (FlatPtr(var) & 3)
  73. return {}; // not aligned!
  74. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  75. if (!is_user)
  76. return {};
  77. Kernel::SmapDisabler disabler;
  78. return Kernel::safe_atomic_exchange_relaxed(var, val);
  79. }
  80. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  81. {
  82. if (FlatPtr(var) & 3)
  83. return {}; // not aligned!
  84. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  85. if (!is_user)
  86. return {};
  87. Kernel::SmapDisabler disabler;
  88. return Kernel::safe_atomic_load_relaxed(var);
  89. }
  90. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  91. {
  92. if (FlatPtr(var) & 3)
  93. return false; // not aligned!
  94. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  95. if (!is_user)
  96. return false;
  97. Kernel::SmapDisabler disabler;
  98. return Kernel::safe_atomic_store_relaxed(var, val);
  99. }
  100. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  101. {
  102. if (FlatPtr(var) & 3)
  103. return {}; // not aligned!
  104. ASSERT(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  105. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  106. if (!is_user)
  107. return {};
  108. Kernel::SmapDisabler disabler;
  109. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  110. }
  111. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  112. {
  113. if (FlatPtr(var) & 3)
  114. return {}; // not aligned!
  115. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  116. if (!is_user)
  117. return {};
  118. Kernel::SmapDisabler disabler;
  119. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  120. }
  121. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  122. {
  123. if (FlatPtr(var) & 3)
  124. return {}; // not aligned!
  125. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  126. if (!is_user)
  127. return {};
  128. Kernel::SmapDisabler disabler;
  129. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  130. }
  131. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  132. {
  133. if (FlatPtr(var) & 3)
  134. return {}; // not aligned!
  135. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  136. if (!is_user)
  137. return {};
  138. Kernel::SmapDisabler disabler;
  139. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  140. }
  141. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  142. {
  143. if (FlatPtr(var) & 3)
  144. return {}; // not aligned!
  145. bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  146. if (!is_user)
  147. return {};
  148. Kernel::SmapDisabler disabler;
  149. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  150. }
  151. extern "C" {
  152. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  153. {
  154. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  155. if (!is_user)
  156. return false;
  157. ASSERT(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  158. Kernel::SmapDisabler disabler;
  159. void* fault_at;
  160. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  161. ASSERT(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  162. klog() << "copy_to_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  163. return false;
  164. }
  165. return true;
  166. }
  167. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  168. {
  169. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  170. if (!is_user)
  171. return false;
  172. ASSERT(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  173. Kernel::SmapDisabler disabler;
  174. void* fault_at;
  175. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  176. ASSERT(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  177. klog() << "copy_from_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  178. return false;
  179. }
  180. return true;
  181. }
  182. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  183. {
  184. size_t dest = (size_t)dest_ptr;
  185. size_t src = (size_t)src_ptr;
  186. // FIXME: Support starting at an unaligned address.
  187. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  188. size_t size_ts = n / sizeof(size_t);
  189. asm volatile(
  190. "rep movsl\n"
  191. : "=S"(src), "=D"(dest)
  192. : "S"(src), "D"(dest), "c"(size_ts)
  193. : "memory");
  194. n -= size_ts * sizeof(size_t);
  195. if (n == 0)
  196. return dest_ptr;
  197. }
  198. asm volatile(
  199. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  200. : "memory");
  201. return dest_ptr;
  202. }
  203. void* memmove(void* dest, const void* src, size_t n)
  204. {
  205. if (dest < src)
  206. return memcpy(dest, src, n);
  207. u8* pd = (u8*)dest;
  208. const u8* ps = (const u8*)src;
  209. for (pd += n, ps += n; n--;)
  210. *--pd = *--ps;
  211. return dest;
  212. }
  213. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  214. {
  215. return AK::memmem(haystack, haystack_length, needle, needle_length);
  216. }
  217. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  218. {
  219. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  220. if (!is_user)
  221. return false;
  222. Kernel::SmapDisabler disabler;
  223. void* fault_at;
  224. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  225. klog() << "memset(" << dest_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  226. return false;
  227. }
  228. return true;
  229. }
  230. void* memset(void* dest_ptr, int c, size_t n)
  231. {
  232. size_t dest = (size_t)dest_ptr;
  233. // FIXME: Support starting at an unaligned address.
  234. if (!(dest & 0x3) && n >= 12) {
  235. size_t size_ts = n / sizeof(size_t);
  236. size_t expanded_c = (u8)c;
  237. expanded_c |= expanded_c << 8;
  238. expanded_c |= expanded_c << 16;
  239. asm volatile(
  240. "rep stosl\n"
  241. : "=D"(dest)
  242. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  243. : "memory");
  244. n -= size_ts * sizeof(size_t);
  245. if (n == 0)
  246. return dest_ptr;
  247. }
  248. asm volatile(
  249. "rep stosb\n"
  250. : "=D"(dest), "=c"(n)
  251. : "0"(dest), "1"(n), "a"(c)
  252. : "memory");
  253. return dest_ptr;
  254. }
  255. size_t strlen(const char* str)
  256. {
  257. size_t len = 0;
  258. while (*(str++))
  259. ++len;
  260. return len;
  261. }
  262. size_t strnlen(const char* str, size_t maxlen)
  263. {
  264. size_t len = 0;
  265. for (; len < maxlen && *str; str++)
  266. len++;
  267. return len;
  268. }
  269. int strcmp(const char* s1, const char* s2)
  270. {
  271. for (; *s1 == *s2; ++s1, ++s2) {
  272. if (*s1 == 0)
  273. return 0;
  274. }
  275. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  276. }
  277. int memcmp(const void* v1, const void* v2, size_t n)
  278. {
  279. auto* s1 = (const u8*)v1;
  280. auto* s2 = (const u8*)v2;
  281. while (n-- > 0) {
  282. if (*s1++ != *s2++)
  283. return s1[-1] < s2[-1] ? -1 : 1;
  284. }
  285. return 0;
  286. }
  287. int strncmp(const char* s1, const char* s2, size_t n)
  288. {
  289. if (!n)
  290. return 0;
  291. do {
  292. if (*s1 != *s2++)
  293. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  294. if (*s1++ == 0)
  295. break;
  296. } while (--n);
  297. return 0;
  298. }
  299. char* strstr(const char* haystack, const char* needle)
  300. {
  301. char nch;
  302. char hch;
  303. if ((nch = *needle++) != 0) {
  304. size_t len = strlen(needle);
  305. do {
  306. do {
  307. if ((hch = *haystack++) == 0)
  308. return nullptr;
  309. } while (hch != nch);
  310. } while (strncmp(haystack, needle, len) != 0);
  311. --haystack;
  312. }
  313. return const_cast<char*>(haystack);
  314. }
  315. void* realloc(void* p, size_t s)
  316. {
  317. return krealloc(p, s);
  318. }
  319. void free(void* p)
  320. {
  321. return kfree(p);
  322. }
  323. // Functions that are automatically called by the C++ compiler.
  324. // Declare them first, to tell the silly compiler that they are indeed being used.
  325. [[noreturn]] void __stack_chk_fail();
  326. [[noreturn]] void __stack_chk_fail_local();
  327. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  328. [[noreturn]] void __cxa_pure_virtual();
  329. [[noreturn]] void __stack_chk_fail()
  330. {
  331. ASSERT_NOT_REACHED();
  332. }
  333. [[noreturn]] void __stack_chk_fail_local()
  334. {
  335. ASSERT_NOT_REACHED();
  336. }
  337. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  338. {
  339. ASSERT_NOT_REACHED();
  340. return 0;
  341. }
  342. [[noreturn]] void __cxa_pure_virtual()
  343. {
  344. ASSERT_NOT_REACHED();
  345. }
  346. }