StdLib.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/String.h>
  9. #include <AK/Types.h>
  10. #include <Kernel/Arch/x86/SmapDisabler.h>
  11. #include <Kernel/Heap/kmalloc.h>
  12. #include <Kernel/Memory/MemoryManager.h>
  13. #include <Kernel/StdLib.h>
  14. String copy_string_from_user(const char* user_str, size_t user_str_size)
  15. {
  16. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(user_str), user_str_size);
  17. if (!is_user)
  18. return {};
  19. Kernel::SmapDisabler disabler;
  20. void* fault_at;
  21. ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
  22. if (length < 0) {
  23. dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  24. return {};
  25. }
  26. if (length == 0)
  27. return String::empty();
  28. char* buffer;
  29. auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
  30. if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
  31. dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
  32. return {};
  33. }
  34. return copied_string;
  35. }
  36. String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
  37. {
  38. return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
  39. }
  40. Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
  41. {
  42. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(user_str), user_str_size);
  43. if (!is_user)
  44. return EFAULT;
  45. Kernel::SmapDisabler disabler;
  46. void* fault_at;
  47. ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
  48. if (length < 0) {
  49. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  50. return EFAULT;
  51. }
  52. char* buffer;
  53. auto new_string = Kernel::KString::try_create_uninitialized(length, buffer);
  54. if (!new_string)
  55. return ENOMEM;
  56. buffer[length] = '\0';
  57. if (length == 0)
  58. return new_string.release_nonnull();
  59. if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
  60. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  61. return EFAULT;
  62. }
  63. return new_string.release_nonnull();
  64. }
  65. [[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
  66. {
  67. timespec ts;
  68. if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
  69. return {};
  70. }
  71. return Time::from_timespec(ts);
  72. }
  73. [[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
  74. {
  75. timeval tv;
  76. if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
  77. return {};
  78. }
  79. return Time::from_timeval(tv);
  80. }
  81. template<>
  82. [[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  83. template<>
  84. [[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  85. template<>
  86. [[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  87. template<>
  88. [[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  89. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  90. {
  91. if (FlatPtr(var) & 3)
  92. return {}; // not aligned!
  93. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  94. if (!is_user)
  95. return {};
  96. Kernel::SmapDisabler disabler;
  97. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  98. }
  99. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  100. {
  101. if (FlatPtr(var) & 3)
  102. return {}; // not aligned!
  103. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  104. if (!is_user)
  105. return {};
  106. Kernel::SmapDisabler disabler;
  107. return Kernel::safe_atomic_exchange_relaxed(var, val);
  108. }
  109. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  110. {
  111. if (FlatPtr(var) & 3)
  112. return {}; // not aligned!
  113. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  114. if (!is_user)
  115. return {};
  116. Kernel::SmapDisabler disabler;
  117. return Kernel::safe_atomic_load_relaxed(var);
  118. }
  119. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  120. {
  121. if (FlatPtr(var) & 3)
  122. return false; // not aligned!
  123. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  124. if (!is_user)
  125. return false;
  126. Kernel::SmapDisabler disabler;
  127. return Kernel::safe_atomic_store_relaxed(var, val);
  128. }
  129. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  130. {
  131. if (FlatPtr(var) & 3)
  132. return {}; // not aligned!
  133. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  134. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  135. if (!is_user)
  136. return {};
  137. Kernel::SmapDisabler disabler;
  138. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  139. }
  140. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  141. {
  142. if (FlatPtr(var) & 3)
  143. return {}; // not aligned!
  144. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  145. if (!is_user)
  146. return {};
  147. Kernel::SmapDisabler disabler;
  148. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  149. }
  150. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  151. {
  152. if (FlatPtr(var) & 3)
  153. return {}; // not aligned!
  154. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  155. if (!is_user)
  156. return {};
  157. Kernel::SmapDisabler disabler;
  158. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  159. }
  160. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  161. {
  162. if (FlatPtr(var) & 3)
  163. return {}; // not aligned!
  164. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  165. if (!is_user)
  166. return {};
  167. Kernel::SmapDisabler disabler;
  168. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  169. }
  170. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  171. {
  172. if (FlatPtr(var) & 3)
  173. return {}; // not aligned!
  174. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  175. if (!is_user)
  176. return {};
  177. Kernel::SmapDisabler disabler;
  178. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  179. }
  180. extern "C" {
  181. bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
  182. {
  183. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
  184. if (!is_user)
  185. return false;
  186. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
  187. Kernel::SmapDisabler disabler;
  188. void* fault_at;
  189. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  190. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  191. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  192. return false;
  193. }
  194. return true;
  195. }
  196. bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
  197. {
  198. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n);
  199. if (!is_user)
  200. return false;
  201. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
  202. Kernel::SmapDisabler disabler;
  203. void* fault_at;
  204. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  205. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  206. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  207. return false;
  208. }
  209. return true;
  210. }
  211. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  212. {
  213. return AK::memmem(haystack, haystack_length, needle, needle_length);
  214. }
  215. [[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
  216. {
  217. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
  218. if (!is_user)
  219. return false;
  220. Kernel::SmapDisabler disabler;
  221. void* fault_at;
  222. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  223. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  224. return false;
  225. }
  226. return true;
  227. }
  228. size_t strnlen(const char* str, size_t maxlen)
  229. {
  230. size_t len = 0;
  231. for (; len < maxlen && *str; str++)
  232. len++;
  233. return len;
  234. }
  235. int strcmp(const char* s1, const char* s2)
  236. {
  237. for (; *s1 == *s2; ++s1, ++s2) {
  238. if (*s1 == 0)
  239. return 0;
  240. }
  241. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  242. }
  243. int memcmp(const void* v1, const void* v2, size_t n)
  244. {
  245. auto* s1 = (const u8*)v1;
  246. auto* s2 = (const u8*)v2;
  247. while (n-- > 0) {
  248. if (*s1++ != *s2++)
  249. return s1[-1] < s2[-1] ? -1 : 1;
  250. }
  251. return 0;
  252. }
  253. int strncmp(const char* s1, const char* s2, size_t n)
  254. {
  255. if (!n)
  256. return 0;
  257. do {
  258. if (*s1 != *s2++)
  259. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  260. if (*s1++ == 0)
  261. break;
  262. } while (--n);
  263. return 0;
  264. }
  265. char* strstr(const char* haystack, const char* needle)
  266. {
  267. char nch;
  268. char hch;
  269. if ((nch = *needle++) != 0) {
  270. size_t len = strlen(needle);
  271. do {
  272. do {
  273. if ((hch = *haystack++) == 0)
  274. return nullptr;
  275. } while (hch != nch);
  276. } while (strncmp(haystack, needle, len) != 0);
  277. --haystack;
  278. }
  279. return const_cast<char*>(haystack);
  280. }
  281. // Functions that are automatically called by the C++ compiler.
  282. // Declare them first, to tell the silly compiler that they are indeed being used.
  283. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  284. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  285. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  286. [[noreturn]] void __cxa_pure_virtual();
  287. [[noreturn]] void __stack_chk_fail()
  288. {
  289. VERIFY_NOT_REACHED();
  290. }
  291. [[noreturn]] void __stack_chk_fail_local()
  292. {
  293. VERIFY_NOT_REACHED();
  294. }
  295. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  296. {
  297. VERIFY_NOT_REACHED();
  298. return 0;
  299. }
  300. [[noreturn]] void __cxa_pure_virtual()
  301. {
  302. VERIFY_NOT_REACHED();
  303. }
  304. }