StdLib.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Assertions.h>
  7. #include <AK/MemMem.h>
  8. #include <AK/Types.h>
  9. #include <Kernel/Arch/SmapDisabler.h>
  10. #include <Kernel/Heap/kmalloc.h>
  11. #include <Kernel/Memory/MemoryManager.h>
  12. #include <Kernel/StdLib.h>
  13. ErrorOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
  14. {
  15. bool is_user = Kernel::Memory::is_user_range(user_str.vaddr(), user_str_size);
  16. if (!is_user)
  17. return EFAULT;
  18. Kernel::SmapDisabler disabler;
  19. void* fault_at;
  20. ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
  21. if (length < 0) {
  22. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  23. return EFAULT;
  24. }
  25. char* buffer;
  26. auto new_string = TRY(Kernel::KString::try_create_uninitialized(length, buffer));
  27. buffer[length] = '\0';
  28. if (length == 0)
  29. return new_string;
  30. if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
  31. dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
  32. return EFAULT;
  33. }
  34. return new_string;
  35. }
  36. ErrorOr<Time> copy_time_from_user(timespec const* ts_user)
  37. {
  38. timespec ts {};
  39. TRY(copy_from_user(&ts, ts_user, sizeof(timespec)));
  40. return Time::from_timespec(ts);
  41. }
  42. ErrorOr<Time> copy_time_from_user(timeval const* tv_user)
  43. {
  44. timeval tv {};
  45. TRY(copy_from_user(&tv, tv_user, sizeof(timeval)));
  46. return Time::from_timeval(tv);
  47. }
  48. template<>
  49. ErrorOr<Time> copy_time_from_user<const timeval>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  50. template<>
  51. ErrorOr<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  52. template<>
  53. ErrorOr<Time> copy_time_from_user<const timespec>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  54. template<>
  55. ErrorOr<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
  56. Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  57. {
  58. if (FlatPtr(var) & 3)
  59. return {}; // not aligned!
  60. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  61. if (!is_user)
  62. return {};
  63. Kernel::SmapDisabler disabler;
  64. return Kernel::safe_atomic_fetch_add_relaxed(var, val);
  65. }
  66. Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
  67. {
  68. if (FlatPtr(var) & 3)
  69. return {}; // not aligned!
  70. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  71. if (!is_user)
  72. return {};
  73. Kernel::SmapDisabler disabler;
  74. return Kernel::safe_atomic_exchange_relaxed(var, val);
  75. }
  76. Optional<u32> user_atomic_load_relaxed(volatile u32* var)
  77. {
  78. if (FlatPtr(var) & 3)
  79. return {}; // not aligned!
  80. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  81. if (!is_user)
  82. return {};
  83. Kernel::SmapDisabler disabler;
  84. return Kernel::safe_atomic_load_relaxed(var);
  85. }
  86. bool user_atomic_store_relaxed(volatile u32* var, u32 val)
  87. {
  88. if (FlatPtr(var) & 3)
  89. return false; // not aligned!
  90. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  91. if (!is_user)
  92. return false;
  93. Kernel::SmapDisabler disabler;
  94. return Kernel::safe_atomic_store_relaxed(var, val);
  95. }
  96. Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  97. {
  98. if (FlatPtr(var) & 3)
  99. return {}; // not aligned!
  100. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
  101. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  102. if (!is_user)
  103. return {};
  104. Kernel::SmapDisabler disabler;
  105. return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
  106. }
  107. Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
  108. {
  109. if (FlatPtr(var) & 3)
  110. return {}; // not aligned!
  111. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  112. if (!is_user)
  113. return {};
  114. Kernel::SmapDisabler disabler;
  115. return Kernel::safe_atomic_fetch_and_relaxed(var, val);
  116. }
  117. Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
  118. {
  119. if (FlatPtr(var) & 3)
  120. return {}; // not aligned!
  121. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  122. if (!is_user)
  123. return {};
  124. Kernel::SmapDisabler disabler;
  125. return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
  126. }
  127. Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
  128. {
  129. if (FlatPtr(var) & 3)
  130. return {}; // not aligned!
  131. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  132. if (!is_user)
  133. return {};
  134. Kernel::SmapDisabler disabler;
  135. return Kernel::safe_atomic_fetch_or_relaxed(var, val);
  136. }
  137. Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
  138. {
  139. if (FlatPtr(var) & 3)
  140. return {}; // not aligned!
  141. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
  142. if (!is_user)
  143. return {};
  144. Kernel::SmapDisabler disabler;
  145. return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
  146. }
  147. ErrorOr<void> copy_to_user(void* dest_ptr, void const* src_ptr, size_t n)
  148. {
  149. if (!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n))
  150. return EFAULT;
  151. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
  152. Kernel::SmapDisabler disabler;
  153. void* fault_at;
  154. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  155. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
  156. dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  157. return EFAULT;
  158. }
  159. return {};
  160. }
  161. ErrorOr<void> copy_from_user(void* dest_ptr, void const* src_ptr, size_t n)
  162. {
  163. if (!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n))
  164. return EFAULT;
  165. VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
  166. Kernel::SmapDisabler disabler;
  167. void* fault_at;
  168. if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
  169. VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
  170. dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
  171. return EFAULT;
  172. }
  173. return {};
  174. }
  175. ErrorOr<void> memset_user(void* dest_ptr, int c, size_t n)
  176. {
  177. bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
  178. if (!is_user)
  179. return EFAULT;
  180. Kernel::SmapDisabler disabler;
  181. void* fault_at;
  182. if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
  183. dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
  184. return EFAULT;
  185. }
  186. return {};
  187. }
  188. #if defined(__clang__) && defined(ENABLE_KERNEL_LTO)
  189. // Due to a chicken-and-egg situation, certain linker-defined symbols that are added on-demand (like the GOT)
  190. // need to be present before LTO bitcode files are compiled. And since we don't link to any native object files,
  191. // the linker does not know that _GLOBAL_OFFSET_TABLE_ is needed, so it doesn't define it, so linking as a PIE fails.
  192. // See https://bugs.llvm.org/show_bug.cgi?id=39634
  193. FlatPtr missing_got_workaround()
  194. {
  195. extern volatile FlatPtr _GLOBAL_OFFSET_TABLE_;
  196. return _GLOBAL_OFFSET_TABLE_;
  197. }
  198. #endif
  199. extern "C" {
  200. const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
  201. {
  202. return AK::memmem(haystack, haystack_length, needle, needle_length);
  203. }
  204. size_t strnlen(const char* str, size_t maxlen)
  205. {
  206. size_t len = 0;
  207. for (; len < maxlen && *str; str++)
  208. len++;
  209. return len;
  210. }
  211. int strcmp(const char* s1, const char* s2)
  212. {
  213. for (; *s1 == *s2; ++s1, ++s2) {
  214. if (*s1 == 0)
  215. return 0;
  216. }
  217. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  218. }
  219. int memcmp(const void* v1, const void* v2, size_t n)
  220. {
  221. auto* s1 = (const u8*)v1;
  222. auto* s2 = (const u8*)v2;
  223. while (n-- > 0) {
  224. if (*s1++ != *s2++)
  225. return s1[-1] < s2[-1] ? -1 : 1;
  226. }
  227. return 0;
  228. }
  229. int strncmp(const char* s1, const char* s2, size_t n)
  230. {
  231. if (!n)
  232. return 0;
  233. do {
  234. if (*s1 != *s2++)
  235. return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
  236. if (*s1++ == 0)
  237. break;
  238. } while (--n);
  239. return 0;
  240. }
  241. char* strstr(const char* haystack, const char* needle)
  242. {
  243. char nch;
  244. char hch;
  245. if ((nch = *needle++) != 0) {
  246. size_t len = strlen(needle);
  247. do {
  248. do {
  249. if ((hch = *haystack++) == 0)
  250. return nullptr;
  251. } while (hch != nch);
  252. } while (strncmp(haystack, needle, len) != 0);
  253. --haystack;
  254. }
  255. return const_cast<char*>(haystack);
  256. }
  257. // Functions that are automatically called by the C++ compiler.
  258. // Declare them first, to tell the silly compiler that they are indeed being used.
  259. [[noreturn]] void __stack_chk_fail() __attribute__((used));
  260. [[noreturn]] void __stack_chk_fail_local() __attribute__((used));
  261. extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
  262. [[noreturn]] void __cxa_pure_virtual();
  263. [[noreturn]] void __stack_chk_fail()
  264. {
  265. VERIFY_NOT_REACHED();
  266. }
  267. [[noreturn]] void __stack_chk_fail_local()
  268. {
  269. VERIFY_NOT_REACHED();
  270. }
  271. extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
  272. {
  273. VERIFY_NOT_REACHED();
  274. return 0;
  275. }
  276. [[noreturn]] void __cxa_pure_virtual()
  277. {
  278. VERIFY_NOT_REACHED();
  279. }
  280. }