SafeMem.cpp 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /*
  2. * Copyright (c) 2020, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Arch/x86/RegisterState.h>
  7. #include <Kernel/Arch/x86/SafeMem.h>
  8. #define CODE_SECTION(section_name) __attribute__((section(section_name)))
  9. extern "C" u8* start_of_safemem_text;
  10. extern "C" u8* end_of_safemem_text;
  11. extern "C" u8* safe_memcpy_ins_1;
  12. extern "C" u8* safe_memcpy_1_faulted;
  13. extern "C" u8* safe_memcpy_ins_2;
  14. extern "C" u8* safe_memcpy_2_faulted;
  15. extern "C" u8* safe_strnlen_ins;
  16. extern "C" u8* safe_strnlen_faulted;
  17. extern "C" u8* safe_memset_ins_1;
  18. extern "C" u8* safe_memset_1_faulted;
  19. extern "C" u8* safe_memset_ins_2;
  20. extern "C" u8* safe_memset_2_faulted;
  21. extern "C" u8* start_of_safemem_atomic_text;
  22. extern "C" u8* end_of_safemem_atomic_text;
  23. extern "C" u8* safe_atomic_fetch_add_relaxed_ins;
  24. extern "C" u8* safe_atomic_fetch_add_relaxed_faulted;
  25. extern "C" u8* safe_atomic_exchange_relaxed_ins;
  26. extern "C" u8* safe_atomic_exchange_relaxed_faulted;
  27. extern "C" u8* safe_atomic_load_relaxed_ins;
  28. extern "C" u8* safe_atomic_load_relaxed_faulted;
  29. extern "C" u8* safe_atomic_store_relaxed_ins;
  30. extern "C" u8* safe_atomic_store_relaxed_faulted;
  31. extern "C" u8* safe_atomic_compare_exchange_relaxed_ins;
  32. extern "C" u8* safe_atomic_compare_exchange_relaxed_faulted;
  33. namespace Kernel {
  34. CODE_SECTION(".text.safemem")
  35. NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
  36. {
  37. fault_at = nullptr;
  38. size_t dest = (size_t)dest_ptr;
  39. size_t src = (size_t)src_ptr;
  40. size_t remainder;
  41. // FIXME: Support starting at an unaligned address.
  42. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  43. size_t size_ts = n / sizeof(size_t);
  44. asm volatile(
  45. "safe_memcpy_ins_1: \n"
  46. #if ARCH(I386)
  47. "rep movsl \n"
  48. #else
  49. "rep movsq \n"
  50. #endif
  51. "safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  52. : "=S"(src),
  53. "=D"(dest),
  54. "=c"(remainder),
  55. [fault_at] "=d"(fault_at)
  56. : "S"(src),
  57. "D"(dest),
  58. "c"(size_ts)
  59. : "memory");
  60. if (remainder != 0)
  61. return false; // fault_at is already set!
  62. n -= size_ts * sizeof(size_t);
  63. if (n == 0) {
  64. fault_at = nullptr;
  65. return true;
  66. }
  67. }
  68. asm volatile(
  69. "safe_memcpy_ins_2: \n"
  70. "rep movsb \n"
  71. "safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  72. : "=c"(remainder),
  73. [fault_at] "=d"(fault_at)
  74. : "S"(src),
  75. "D"(dest),
  76. "c"(n)
  77. : "memory");
  78. if (remainder != 0)
  79. return false; // fault_at is already set!
  80. fault_at = nullptr;
  81. return true;
  82. }
  83. CODE_SECTION(".text.safemem")
  84. NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
  85. {
  86. ssize_t count = 0;
  87. fault_at = nullptr;
  88. asm volatile(
  89. "1: \n"
  90. "test %[max_n], %[max_n] \n"
  91. "je 2f \n"
  92. "dec %[max_n] \n"
  93. "safe_strnlen_ins: \n"
  94. "cmpb $0,(%[str], %[count], 1) \n"
  95. "je 2f \n"
  96. "inc %[count] \n"
  97. "jmp 1b \n"
  98. "safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  99. "xor %[count_on_error], %[count_on_error] \n"
  100. "dec %[count_on_error] \n" // return -1 on fault
  101. "2:"
  102. : [count_on_error] "=c"(count),
  103. [fault_at] "=d"(fault_at)
  104. : [str] "b"(str),
  105. [count] "c"(count),
  106. [max_n] "d"(max_n));
  107. if (count >= 0)
  108. fault_at = nullptr;
  109. return count;
  110. }
  111. CODE_SECTION(".text.safemem")
  112. NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
  113. {
  114. fault_at = nullptr;
  115. size_t dest = (size_t)dest_ptr;
  116. size_t remainder;
  117. // FIXME: Support starting at an unaligned address.
  118. if (!(dest & 0x3) && n >= 12) {
  119. size_t size_ts = n / sizeof(size_t);
  120. size_t expanded_c = (u8)c;
  121. expanded_c |= expanded_c << 8;
  122. expanded_c |= expanded_c << 16;
  123. asm volatile(
  124. "safe_memset_ins_1: \n"
  125. #if ARCH(I386)
  126. "rep stosl \n"
  127. #else
  128. "rep stosq \n"
  129. #endif
  130. "safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  131. : "=D"(dest),
  132. "=c"(remainder),
  133. [fault_at] "=d"(fault_at)
  134. : "D"(dest),
  135. "a"(expanded_c),
  136. "c"(size_ts)
  137. : "memory");
  138. if (remainder != 0)
  139. return false; // fault_at is already set!
  140. n -= size_ts * sizeof(size_t);
  141. if (n == 0) {
  142. fault_at = nullptr;
  143. return true;
  144. }
  145. }
  146. asm volatile(
  147. "safe_memset_ins_2: \n"
  148. "rep stosb \n"
  149. "safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  150. : "=D"(dest),
  151. "=c"(remainder),
  152. [fault_at] "=d"(fault_at)
  153. : "D"(dest),
  154. "c"(n),
  155. "a"(c)
  156. : "memory");
  157. if (remainder != 0)
  158. return false; // fault_at is already set!
  159. fault_at = nullptr;
  160. return true;
  161. }
  162. CODE_SECTION(".text.safemem.atomic")
  163. NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  164. {
  165. u32 result;
  166. bool error;
  167. asm volatile(
  168. "xor %[error], %[error] \n"
  169. "safe_atomic_fetch_add_relaxed_ins: \n"
  170. "lock xadd %[result], %[var] \n"
  171. "safe_atomic_fetch_add_relaxed_faulted: \n"
  172. : [error] "=d"(error), [result] "=a"(result), [var] "=m"(*var)
  173. : [val] "a"(val)
  174. : "memory");
  175. if (error)
  176. return {};
  177. return result;
  178. }
  179. CODE_SECTION(".text.safemem.atomic")
  180. NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
  181. {
  182. u32 result;
  183. bool error;
  184. asm volatile(
  185. "xor %[error], %[error] \n"
  186. "safe_atomic_exchange_relaxed_ins: \n"
  187. "xchg %[val], %[var] \n"
  188. "safe_atomic_exchange_relaxed_faulted: \n"
  189. : [error] "=d"(error), "=a"(result), [var] "=m"(*var)
  190. : [val] "a"(val)
  191. : "memory");
  192. if (error)
  193. return {};
  194. return result;
  195. }
  196. CODE_SECTION(".text.safemem.atomic")
  197. NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
  198. {
  199. u32 result;
  200. bool error;
  201. asm volatile(
  202. "xor %[error], %[error] \n"
  203. "safe_atomic_load_relaxed_ins: \n"
  204. "mov (%[var]), %[result] \n"
  205. "safe_atomic_load_relaxed_faulted: \n"
  206. : [error] "=d"(error), [result] "=c"(result)
  207. : [var] "b"(var)
  208. : "memory");
  209. if (error)
  210. return {};
  211. return result;
  212. }
  213. CODE_SECTION(".text.safemem.atomic")
  214. NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
  215. {
  216. bool error;
  217. asm volatile(
  218. "xor %[error], %[error] \n"
  219. "safe_atomic_store_relaxed_ins: \n"
  220. "xchg %[val], %[var] \n"
  221. "safe_atomic_store_relaxed_faulted: \n"
  222. : [error] "=d"(error), [var] "=m"(*var)
  223. : [val] "r"(val)
  224. : "memory");
  225. return !error;
  226. }
  227. CODE_SECTION(".text.safemem.atomic")
  228. NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  229. {
  230. // NOTE: accessing expected is NOT protected as it should always point
  231. // to a valid location in kernel memory!
  232. bool error;
  233. bool did_exchange;
  234. asm volatile(
  235. "xor %[error], %[error] \n"
  236. "safe_atomic_compare_exchange_relaxed_ins: \n"
  237. "lock cmpxchg %[val], %[var] \n"
  238. "safe_atomic_compare_exchange_relaxed_faulted: \n"
  239. : [error] "=d"(error), "=a"(expected), [var] "=m"(*var), "=@ccz"(did_exchange)
  240. : "a"(expected), [val] "b"(val)
  241. : "memory");
  242. if (error)
  243. return {};
  244. return did_exchange;
  245. }
  246. bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
  247. {
  248. FlatPtr ip = regs.ip();
  249. ;
  250. if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
  251. // If we detect that the fault happened in safe_memcpy() safe_strnlen(),
  252. // or safe_memset() then resume at the appropriate _faulted label
  253. if (ip == (FlatPtr)&safe_memcpy_ins_1)
  254. ip = (FlatPtr)&safe_memcpy_1_faulted;
  255. else if (ip == (FlatPtr)&safe_memcpy_ins_2)
  256. ip = (FlatPtr)&safe_memcpy_2_faulted;
  257. else if (ip == (FlatPtr)&safe_strnlen_ins)
  258. ip = (FlatPtr)&safe_strnlen_faulted;
  259. else if (ip == (FlatPtr)&safe_memset_ins_1)
  260. ip = (FlatPtr)&safe_memset_1_faulted;
  261. else if (ip == (FlatPtr)&safe_memset_ins_2)
  262. ip = (FlatPtr)&safe_memset_2_faulted;
  263. else
  264. return false;
  265. #if ARCH(I386)
  266. regs.eip = ip;
  267. regs.edx = fault_address;
  268. #else
  269. regs.rip = ip;
  270. regs.rdx = fault_address;
  271. #endif
  272. return true;
  273. }
  274. if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) {
  275. // If we detect that a fault happened in one of the atomic safe_
  276. // functions, resume at the appropriate _faulted label and set
  277. // the edx/rdx register to 1 to indicate an error
  278. if (ip == (FlatPtr)&safe_atomic_fetch_add_relaxed_ins)
  279. ip = (FlatPtr)&safe_atomic_fetch_add_relaxed_faulted;
  280. else if (ip == (FlatPtr)&safe_atomic_exchange_relaxed_ins)
  281. ip = (FlatPtr)&safe_atomic_exchange_relaxed_faulted;
  282. else if (ip == (FlatPtr)&safe_atomic_load_relaxed_ins)
  283. ip = (FlatPtr)&safe_atomic_load_relaxed_faulted;
  284. else if (ip == (FlatPtr)&safe_atomic_store_relaxed_ins)
  285. ip = (FlatPtr)&safe_atomic_store_relaxed_faulted;
  286. else if (ip == (FlatPtr)&safe_atomic_compare_exchange_relaxed_ins)
  287. ip = (FlatPtr)&safe_atomic_compare_exchange_relaxed_faulted;
  288. else
  289. return false;
  290. #if ARCH(I386)
  291. regs.eip = ip;
  292. regs.edx = 1;
  293. #else
  294. regs.rip = ip;
  295. regs.rdx = 1;
  296. #endif
  297. return true;
  298. }
  299. return false;
  300. }
  301. }