SafeMem.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /*
  2. * Copyright (c) 2020, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Arch/x86/RegisterState.h>
  7. #include <Kernel/Arch/x86/SafeMem.h>
  8. #define CODE_SECTION(section_name) __attribute__((section(section_name)))
  9. extern "C" u8 start_of_safemem_text[];
  10. extern "C" u8 end_of_safemem_text[];
  11. extern "C" u8 safe_memcpy_ins_1[];
  12. extern "C" u8 safe_memcpy_1_faulted[];
  13. extern "C" u8 safe_memcpy_ins_2[];
  14. extern "C" u8 safe_memcpy_2_faulted[];
  15. extern "C" u8 safe_strnlen_ins[];
  16. extern "C" u8 safe_strnlen_faulted[];
  17. extern "C" u8 safe_memset_ins_1[];
  18. extern "C" u8 safe_memset_1_faulted[];
  19. extern "C" u8 safe_memset_ins_2[];
  20. extern "C" u8 safe_memset_2_faulted[];
  21. extern "C" u8 start_of_safemem_atomic_text[];
  22. extern "C" u8 end_of_safemem_atomic_text[];
  23. extern "C" u8 safe_atomic_fetch_add_relaxed_ins[];
  24. extern "C" u8 safe_atomic_fetch_add_relaxed_faulted[];
  25. extern "C" u8 safe_atomic_exchange_relaxed_ins[];
  26. extern "C" u8 safe_atomic_exchange_relaxed_faulted[];
  27. extern "C" u8 safe_atomic_load_relaxed_ins[];
  28. extern "C" u8 safe_atomic_load_relaxed_faulted[];
  29. extern "C" u8 safe_atomic_store_relaxed_ins[];
  30. extern "C" u8 safe_atomic_store_relaxed_faulted[];
  31. extern "C" u8 safe_atomic_compare_exchange_relaxed_ins[];
  32. extern "C" u8 safe_atomic_compare_exchange_relaxed_faulted[];
  33. namespace Kernel {
  34. CODE_SECTION(".text.safemem")
  35. NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
  36. {
  37. fault_at = nullptr;
  38. size_t dest = (size_t)dest_ptr;
  39. size_t src = (size_t)src_ptr;
  40. size_t remainder;
  41. // FIXME: Support starting at an unaligned address.
  42. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  43. size_t size_ts = n / sizeof(size_t);
  44. asm volatile(
  45. ".globl safe_memcpy_ins_1 \n"
  46. "safe_memcpy_ins_1: \n"
  47. #if ARCH(I386)
  48. "rep movsl \n"
  49. #else
  50. "rep movsq \n"
  51. #endif
  52. ".globl safe_memcpy_1_faulted \n"
  53. "safe_memcpy_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  54. : "=S"(src),
  55. "=D"(dest),
  56. "=c"(remainder),
  57. [fault_at] "=d"(fault_at)
  58. : "S"(src),
  59. "D"(dest),
  60. "c"(size_ts)
  61. : "memory");
  62. if (remainder != 0)
  63. return false; // fault_at is already set!
  64. n -= size_ts * sizeof(size_t);
  65. if (n == 0) {
  66. fault_at = nullptr;
  67. return true;
  68. }
  69. }
  70. asm volatile(
  71. ".globl safe_memcpy_ins_2 \n"
  72. "safe_memcpy_ins_2: \n"
  73. "rep movsb \n"
  74. ".globl safe_memcpy_2_faulted \n"
  75. "safe_memcpy_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  76. : "=c"(remainder),
  77. [fault_at] "=d"(fault_at)
  78. : "S"(src),
  79. "D"(dest),
  80. "c"(n)
  81. : "memory");
  82. if (remainder != 0)
  83. return false; // fault_at is already set!
  84. fault_at = nullptr;
  85. return true;
  86. }
  87. CODE_SECTION(".text.safemem")
  88. NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
  89. {
  90. ssize_t count = 0;
  91. fault_at = nullptr;
  92. asm volatile(
  93. "1: \n"
  94. "test %[max_n], %[max_n] \n"
  95. "je 2f \n"
  96. "dec %[max_n] \n"
  97. ".globl safe_strnlen_ins \n"
  98. "safe_strnlen_ins: \n"
  99. "cmpb $0,(%[str], %[count], 1) \n"
  100. "je 2f \n"
  101. "inc %[count] \n"
  102. "jmp 1b \n"
  103. ".globl safe_strnlen_faulted \n"
  104. "safe_strnlen_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  105. "xor %[count_on_error], %[count_on_error] \n"
  106. "dec %[count_on_error] \n" // return -1 on fault
  107. "2:"
  108. : [count_on_error] "=c"(count),
  109. [fault_at] "=d"(fault_at)
  110. : [str] "b"(str),
  111. [count] "c"(count),
  112. [max_n] "d"(max_n));
  113. if (count >= 0)
  114. fault_at = nullptr;
  115. return count;
  116. }
  117. CODE_SECTION(".text.safemem")
  118. NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
  119. {
  120. fault_at = nullptr;
  121. size_t dest = (size_t)dest_ptr;
  122. size_t remainder;
  123. // FIXME: Support starting at an unaligned address.
  124. if (!(dest & 0x3) && n >= 12) {
  125. size_t size_ts = n / sizeof(size_t);
  126. size_t expanded_c = (u8)c;
  127. expanded_c |= expanded_c << 8;
  128. expanded_c |= expanded_c << 16;
  129. asm volatile(
  130. ".globl safe_memset_ins_1 \n"
  131. "safe_memset_ins_1: \n"
  132. #if ARCH(I386)
  133. "rep stosl \n"
  134. #else
  135. "rep stosq \n"
  136. #endif
  137. ".globl safe_memset_1_faulted \n"
  138. "safe_memset_1_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  139. : "=D"(dest),
  140. "=c"(remainder),
  141. [fault_at] "=d"(fault_at)
  142. : "D"(dest),
  143. "a"(expanded_c),
  144. "c"(size_ts)
  145. : "memory");
  146. if (remainder != 0)
  147. return false; // fault_at is already set!
  148. n -= size_ts * sizeof(size_t);
  149. if (n == 0) {
  150. fault_at = nullptr;
  151. return true;
  152. }
  153. }
  154. asm volatile(
  155. ".globl safe_memset_ins_2 \n"
  156. "safe_memset_ins_2: \n"
  157. "rep stosb \n"
  158. ".globl safe_memset_2_faulted \n"
  159. "safe_memset_2_faulted: \n" // handle_safe_access_fault() set edx/rdx to the fault address!
  160. : "=D"(dest),
  161. "=c"(remainder),
  162. [fault_at] "=d"(fault_at)
  163. : "D"(dest),
  164. "c"(n),
  165. "a"(c)
  166. : "memory");
  167. if (remainder != 0)
  168. return false; // fault_at is already set!
  169. fault_at = nullptr;
  170. return true;
  171. }
  172. CODE_SECTION(".text.safemem.atomic")
  173. NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
  174. {
  175. u32 result;
  176. bool error;
  177. asm volatile(
  178. "xor %[error], %[error] \n"
  179. ".globl safe_atomic_fetch_add_relaxed_ins \n"
  180. "safe_atomic_fetch_add_relaxed_ins: \n"
  181. "lock xadd %[result], %[var] \n"
  182. ".globl safe_atomic_fetch_add_relaxed_faulted \n"
  183. "safe_atomic_fetch_add_relaxed_faulted: \n"
  184. : [error] "=d"(error), [result] "=a"(result), [var] "=m"(*var)
  185. : [val] "a"(val)
  186. : "memory");
  187. if (error)
  188. return {};
  189. return result;
  190. }
  191. CODE_SECTION(".text.safemem.atomic")
  192. NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
  193. {
  194. u32 result;
  195. bool error;
  196. asm volatile(
  197. "xor %[error], %[error] \n"
  198. ".globl safe_atomic_exchange_relaxed_ins \n"
  199. "safe_atomic_exchange_relaxed_ins: \n"
  200. "xchg %[val], %[var] \n"
  201. ".globl safe_atomic_exchange_relaxed_faulted \n"
  202. "safe_atomic_exchange_relaxed_faulted: \n"
  203. : [error] "=d"(error), "=a"(result), [var] "=m"(*var)
  204. : [val] "a"(val)
  205. : "memory");
  206. if (error)
  207. return {};
  208. return result;
  209. }
  210. CODE_SECTION(".text.safemem.atomic")
  211. NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
  212. {
  213. u32 result;
  214. bool error;
  215. asm volatile(
  216. "xor %[error], %[error] \n"
  217. ".globl safe_atomic_load_relaxed_ins \n"
  218. "safe_atomic_load_relaxed_ins: \n"
  219. "mov (%[var]), %[result] \n"
  220. ".globl safe_atomic_load_relaxed_faulted \n"
  221. "safe_atomic_load_relaxed_faulted: \n"
  222. : [error] "=d"(error), [result] "=c"(result)
  223. : [var] "b"(var)
  224. : "memory");
  225. if (error)
  226. return {};
  227. return result;
  228. }
  229. CODE_SECTION(".text.safemem.atomic")
  230. NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
  231. {
  232. bool error;
  233. asm volatile(
  234. "xor %[error], %[error] \n"
  235. ".globl safe_atomic_store_relaxed_ins \n"
  236. "safe_atomic_store_relaxed_ins: \n"
  237. "xchg %[val], %[var] \n"
  238. ".globl safe_atomic_store_relaxed_faulted \n"
  239. "safe_atomic_store_relaxed_faulted: \n"
  240. : [error] "=d"(error), [var] "=m"(*var)
  241. : [val] "r"(val)
  242. : "memory");
  243. return !error;
  244. }
  245. CODE_SECTION(".text.safemem.atomic")
  246. NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
  247. {
  248. // NOTE: accessing expected is NOT protected as it should always point
  249. // to a valid location in kernel memory!
  250. bool error;
  251. bool did_exchange;
  252. asm volatile(
  253. "xor %[error], %[error] \n"
  254. ".globl safe_atomic_compare_exchange_relaxed_ins \n"
  255. "safe_atomic_compare_exchange_relaxed_ins: \n"
  256. "lock cmpxchg %[val], %[var] \n"
  257. ".globl safe_atomic_compare_exchange_relaxed_faulted \n"
  258. "safe_atomic_compare_exchange_relaxed_faulted: \n"
  259. : [error] "=d"(error), "=a"(expected), [var] "=m"(*var), "=@ccz"(did_exchange)
  260. : "a"(expected), [val] "b"(val)
  261. : "memory");
  262. if (error)
  263. return {};
  264. return did_exchange;
  265. }
  266. bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
  267. {
  268. FlatPtr ip = regs.ip();
  269. ;
  270. if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
  271. // If we detect that the fault happened in safe_memcpy() safe_strnlen(),
  272. // or safe_memset() then resume at the appropriate _faulted label
  273. if (ip == (FlatPtr)safe_memcpy_ins_1)
  274. ip = (FlatPtr)safe_memcpy_1_faulted;
  275. else if (ip == (FlatPtr)safe_memcpy_ins_2)
  276. ip = (FlatPtr)safe_memcpy_2_faulted;
  277. else if (ip == (FlatPtr)safe_strnlen_ins)
  278. ip = (FlatPtr)safe_strnlen_faulted;
  279. else if (ip == (FlatPtr)safe_memset_ins_1)
  280. ip = (FlatPtr)safe_memset_1_faulted;
  281. else if (ip == (FlatPtr)safe_memset_ins_2)
  282. ip = (FlatPtr)safe_memset_2_faulted;
  283. else
  284. return false;
  285. regs.set_ip(ip);
  286. regs.set_dx(fault_address);
  287. return true;
  288. }
  289. if (ip >= (FlatPtr)&start_of_safemem_atomic_text && ip < (FlatPtr)&end_of_safemem_atomic_text) {
  290. // If we detect that a fault happened in one of the atomic safe_
  291. // functions, resume at the appropriate _faulted label and set
  292. // the edx/rdx register to 1 to indicate an error
  293. if (ip == (FlatPtr)safe_atomic_fetch_add_relaxed_ins)
  294. ip = (FlatPtr)safe_atomic_fetch_add_relaxed_faulted;
  295. else if (ip == (FlatPtr)safe_atomic_exchange_relaxed_ins)
  296. ip = (FlatPtr)safe_atomic_exchange_relaxed_faulted;
  297. else if (ip == (FlatPtr)safe_atomic_load_relaxed_ins)
  298. ip = (FlatPtr)safe_atomic_load_relaxed_faulted;
  299. else if (ip == (FlatPtr)safe_atomic_store_relaxed_ins)
  300. ip = (FlatPtr)safe_atomic_store_relaxed_faulted;
  301. else if (ip == (FlatPtr)safe_atomic_compare_exchange_relaxed_ins)
  302. ip = (FlatPtr)safe_atomic_compare_exchange_relaxed_faulted;
  303. else
  304. return false;
  305. regs.set_ip(ip);
  306. regs.set_dx(1);
  307. return true;
  308. }
  309. return false;
  310. }
  311. }