StdLib.cpp 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <AK/Assertions.h>
  27. #include <AK/MemMem.h>
  28. #include <AK/String.h>
  29. #include <AK/Types.h>
  30. #include <Kernel/Arch/i386/CPU.h>
  31. #include <Kernel/Heap/kmalloc.h>
  32. #include <Kernel/StdLib.h>
  33. #include <Kernel/VM/MemoryManager.h>
  34. String copy_string_from_user(const char* user_str, size_t user_str_size)
  35. {
  36. bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
  37. ASSERT(is_user); // For now assert to catch bugs, but technically not an error
  38. if (!is_user)
  39. return {};
  40. Kernel::SmapDisabler disabler;
  41. void* fault_at;
  42. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  43. if (length < 0) {
  44. klog() << "copy_string_from_user(" << user_str << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (strnlen)";
  45. return {};
  46. }
  47. if (length == 0)
  48. return String::empty();
  49. char* buffer;
  50. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  51. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  52. klog() << "copy_string_from_user(" << user_str << ", " << user_str_size << ") failed at " << VirtualAddress(fault_at) << " (memcpy)";
  53. return {};
  54. }
  55. return copied_string;
  56. }
  57. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  58. {
  59. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  60. }
  61. extern "C" {
  62. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  63. {
  64. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  65. ASSERT(is_user); // For now assert to catch bugs, but technically not an error
  66. if (!is_user)
  67. return false;
  68. ASSERT(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
  69. Kernel::SmapDisabler disabler;
  70. void* fault_at;
  71. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  72. ASSERT(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  73. klog() << "copy_to_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  74. return false;
  75. }
  76. return true;
  77. }
  78. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  79. {
  80. bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
  81. ASSERT(is_user); // For now assert to catch bugs, but technically not an error
  82. if (!is_user)
  83. return false;
  84. ASSERT(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
  85. Kernel::SmapDisabler disabler;
  86. void* fault_at;
  87. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  88. ASSERT(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  89. klog() << "copy_from_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  90. return false;
  91. }
  92. return true;
  93. }
  94. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  95. {
  96. size_t dest = (size_t)dest_ptr;
  97. size_t src = (size_t)src_ptr;
  98. // FIXME: Support starting at an unaligned address.
  99. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  100. size_t size_ts = n / sizeof(size_t);
  101. asm volatile(
  102. "rep movsl\n"
  103. : "=S"(src), "=D"(dest)
  104. : "S"(src), "D"(dest), "c"(size_ts)
  105. : "memory");
  106. n -= size_ts * sizeof(size_t);
  107. if (n == 0)
  108. return dest_ptr;
  109. }
  110. asm volatile(
  111. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  112. : "memory");
  113. return dest_ptr;
  114. }
  115. void* memmove(void* dest, const void* src, size_t n)
  116. {
  117. if (dest < src)
  118. return memcpy(dest, src, n);
  119. u8* pd = (u8*)dest;
  120. const u8* ps = (const u8*)src;
  121. for (pd += n, ps += n; n--;)
  122. *--pd = *--ps;
  123. return dest;
  124. }
  125. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  126. {
  127. return AK::memmem(haystack, haystack_length, needle, needle_length);
  128. }
  129. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  130. {
  131. bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
  132. ASSERT(is_user); // For now assert to catch bugs, but technically not an error
  133. if (!is_user)
  134. return false;
  135. Kernel::SmapDisabler disabler;
  136. void* fault_at;
  137. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  138. klog() << "memset(" << dest_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
  139. return false;
  140. }
  141. return true;
  142. }
  143. void* memset(void* dest_ptr, int c, size_t n)
  144. {
  145. size_t dest = (size_t)dest_ptr;
  146. // FIXME: Support starting at an unaligned address.
  147. if (!(dest & 0x3) && n >= 12) {
  148. size_t size_ts = n / sizeof(size_t);
  149. size_t expanded_c = (u8)c;
  150. expanded_c |= expanded_c << 8;
  151. expanded_c |= expanded_c << 16;
  152. asm volatile(
  153. "rep stosl\n"
  154. : "=D"(dest)
  155. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  156. : "memory");
  157. n -= size_ts * sizeof(size_t);
  158. if (n == 0)
  159. return dest_ptr;
  160. }
  161. asm volatile(
  162. "rep stosb\n"
  163. : "=D"(dest), "=c"(n)
  164. : "0"(dest), "1"(n), "a"(c)
  165. : "memory");
  166. return dest_ptr;
  167. }
  168. size_t strlen(const char* str)
  169. {
  170. size_t len = 0;
  171. while (*(str++))
  172. ++len;
  173. return len;
  174. }
  175. size_t strnlen(const char* str, size_t maxlen)
  176. {
  177. size_t len = 0;
  178. for (; len < maxlen && *str; str++)
  179. len++;
  180. return len;
  181. }
  182. int strcmp(const char* s1, const char* s2)
  183. {
  184. for (; *s1 == *s2; ++s1, ++s2) {
  185. if (*s1 == 0)
  186. return 0;
  187. }
  188. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  189. }
  190. int memcmp(const void* v1, const void* v2, size_t n)
  191. {
  192. auto* s1 = (const u8*)v1;
  193. auto* s2 = (const u8*)v2;
  194. while (n-- > 0) {
  195. if (*s1++ != *s2++)
  196. return s1[-1] < s2[-1] ? -1 : 1;
  197. }
  198. return 0;
  199. }
  200. int strncmp(const char* s1, const char* s2, size_t n)
  201. {
  202. if (!n)
  203. return 0;
  204. do {
  205. if (*s1 != *s2++)
  206. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  207. if (*s1++ == 0)
  208. break;
  209. } while (--n);
  210. return 0;
  211. }
  212. char* strstr(const char* haystack, const char* needle)
  213. {
  214. char nch;
  215. char hch;
  216. if ((nch = *needle++) != 0) {
  217. size_t len = strlen(needle);
  218. do {
  219. do {
  220. if ((hch = *haystack++) == 0)
  221. return nullptr;
  222. } while (hch != nch);
  223. } while (strncmp(haystack, needle, len) != 0);
  224. --haystack;
  225. }
  226. return const_cast<char*>(haystack);
  227. }
  228. void* realloc(void* p, size_t s)
  229. {
  230. return krealloc(p, s);
  231. }
  232. void free(void* p)
  233. {
  234. return kfree(p);
  235. }
  236. // Functions that are automatically called by the C++ compiler.
  237. // Declare them first, to tell the silly compiler that they are indeed being used.
  238. [[noreturn]] void __stack_chk_fail();
  239. [[noreturn]] void __stack_chk_fail_local();
  240. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  241. [[noreturn]] void __cxa_pure_virtual();
  242. [[noreturn]] void __stack_chk_fail()
  243. {
  244. ASSERT_NOT_REACHED();
  245. }
  246. [[noreturn]] void __stack_chk_fail_local()
  247. {
  248. ASSERT_NOT_REACHED();
  249. }
  250. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  251. {
  252. ASSERT_NOT_REACHED();
  253. return 0;
  254. }
  255. [[noreturn]] void __cxa_pure_virtual()
  256. {
  257. ASSERT_NOT_REACHED();
  258. }
  259. }