StdLib.cpp 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/String.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/Arch/x86/SmapDisabler.h>
  11. #include <Kernel/Heap/kmalloc.h>
  12. #include <Kernel/Memory/MemoryManager.h>
  13. #include <Kernel/StdLib.h>
  14. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
  15. {
  16. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(user_str), user_str_size);
  17. if (!is_user)
  18. return EFAULT;
  19. Kernel::SmapDisabler disabler;
  20. void* fault_at;
  21. ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
  22. if (length < 0) {
  23. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  24. return EFAULT;
  25. }
  26. char* buffer;
  27. auto new_string = Kernel::KString::try_create_uninitialized(length, buffer);
  28. if (!new_string)
  29. return ENOMEM;
  30. buffer[length] = '\0';
  31. if (length == 0)
  32. return new_string.release_nonnull();
  33. if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
  34. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  35. return EFAULT;
  36. }
  37. return new_string.release_nonnull();
  38. }
  39. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  40. {
  41. timespec ts;
  42. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  43. return {};
  44. }
  45. return Time::from_timespec(ts);
  46. }
  47. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  48. {
  49. timeval tv;
  50. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  51. return {};
  52. }
  53. return Time::from_timeval(tv);
  54. }
  55. template<>
  56. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  57. template<>
  58. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  59. template<>
  60. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  61. template<>
  62. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  63. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  64. {
  65. if (FlatPtr(var) & 3)
  66. return {}; // not aligned!
  67. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  68. if (!is_user)
  69. return {};
  70. Kernel::SmapDisabler disabler;
  71. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  72. }
  73. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  74. {
  75. if (FlatPtr(var) & 3)
  76. return {}; // not aligned!
  77. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  78. if (!is_user)
  79. return {};
  80. Kernel::SmapDisabler disabler;
  81. return Kernel::safe_atomic_exchange_relaxed(var, val);
  82. }
  83. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  84. {
  85. if (FlatPtr(var) & 3)
  86. return {}; // not aligned!
  87. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  88. if (!is_user)
  89. return {};
  90. Kernel::SmapDisabler disabler;
  91. return Kernel::safe_atomic_load_relaxed(var);
  92. }
  93. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  94. {
  95. if (FlatPtr(var) & 3)
  96. return false; // not aligned!
  97. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  98. if (!is_user)
  99. return false;
  100. Kernel::SmapDisabler disabler;
  101. return Kernel::safe_atomic_store_relaxed(var, val);
  102. }
  103. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  104. {
  105. if (FlatPtr(var) & 3)
  106. return {}; // not aligned!
  107. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  108. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  109. if (!is_user)
  110. return {};
  111. Kernel::SmapDisabler disabler;
  112. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  113. }
  114. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  115. {
  116. if (FlatPtr(var) & 3)
  117. return {}; // not aligned!
  118. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  119. if (!is_user)
  120. return {};
  121. Kernel::SmapDisabler disabler;
  122. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  123. }
  124. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  125. {
  126. if (FlatPtr(var) & 3)
  127. return {}; // not aligned!
  128. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  129. if (!is_user)
  130. return {};
  131. Kernel::SmapDisabler disabler;
  132. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  133. }
  134. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  135. {
  136. if (FlatPtr(var) & 3)
  137. return {}; // not aligned!
  138. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  139. if (!is_user)
  140. return {};
  141. Kernel::SmapDisabler disabler;
  142. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  143. }
  144. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  145. {
  146. if (FlatPtr(var) & 3)
  147. return {}; // not aligned!
  148. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  149. if (!is_user)
  150. return {};
  151. Kernel::SmapDisabler disabler;
  152. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  153. }
  154. extern "C" {
  155. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  156. {
  157. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
  158. if (!is_user)
  159. return false;
  160. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
  161. Kernel::SmapDisabler disabler;
  162. void* fault_at;
  163. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  164. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  165. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  166. return false;
  167. }
  168. return true;
  169. }
  170. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  171. {
  172. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n);
  173. if (!is_user)
  174. return false;
  175. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
  176. Kernel::SmapDisabler disabler;
  177. void* fault_at;
  178. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  179. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  180. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  181. return false;
  182. }
  183. return true;
  184. }
  185. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  186. {
  187. return AK::memmem(haystack, haystack_length, needle, needle_length);
  188. }
  189. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  190. {
  191. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
  192. if (!is_user)
  193. return false;
  194. Kernel::SmapDisabler disabler;
  195. void* fault_at;
  196. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  197. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  198. return false;
  199. }
  200. return true;
  201. }
  202. size_t strnlen(const char* str, size_t maxlen)
  203. {
  204. size_t len = 0;
  205. for (; len < maxlen && *str; str++)
  206. len++;
  207. return len;
  208. }
  209. int strcmp(const char* s1, const char* s2)
  210. {
  211. for (; *s1 == *s2; ++s1, ++s2) {
  212. if (*s1 == 0)
  213. return 0;
  214. }
  215. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  216. }
  217. int memcmp(const void* v1, const void* v2, size_t n)
  218. {
  219. auto* s1 = (const u8*)v1;
  220. auto* s2 = (const u8*)v2;
  221. while (n-- > 0) {
  222. if (*s1++ != *s2++)
  223. return s1[-1] < s2[-1] ? -1 : 1;
  224. }
  225. return 0;
  226. }
  227. int strncmp(const char* s1, const char* s2, size_t n)
  228. {
  229. if (!n)
  230. return 0;
  231. do {
  232. if (*s1 != *s2++)
  233. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  234. if (*s1++ == 0)
  235. break;
  236. } while (--n);
  237. return 0;
  238. }
  239. char* strstr(const char* haystack, const char* needle)
  240. {
  241. char nch;
  242. char hch;
  243. if ((nch = *needle++) != 0) {
  244. size_t len = strlen(needle);
  245. do {
  246. do {
  247. if ((hch = *haystack++) == 0)
  248. return nullptr;
  249. } while (hch != nch);
  250. } while (strncmp(haystack, needle, len) != 0);
  251. --haystack;
  252. }
  253. return const_cast<char*>(haystack);
  254. }
  255. // Functions that are automatically called by the C++ compiler.
  256. // Declare them first, to tell the silly compiler that they are indeed being used.
  257. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  258. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  259. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  260. [[noreturn]] void __cxa_pure_virtual();
  261. [[noreturn]] void __stack_chk_fail()
  262. {
  263. VERIFY_NOT_REACHED();
  264. }
  265. [[noreturn]] void __stack_chk_fail_local()
  266. {
  267. VERIFY_NOT_REACHED();
  268. }
  269. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  270. {
  271. VERIFY_NOT_REACHED();
  272. return 0;
  273. }
  274. [[noreturn]] void __cxa_pure_virtual()
  275. {
  276. VERIFY_NOT_REACHED();
  277. }
  278. }