StdLib.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #include <AK/Assertions.h>
  2. #include <AK/Types.h>
  3. #include <Kernel/kmalloc.h>
  4. extern "C" {
  5. void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
  6. {
  7. size_t dest = (size_t)dest_ptr;
  8. size_t src = (size_t)src_ptr;
  9. // FIXME: Support starting at an unaligned address.
  10. if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
  11. size_t size_ts = n / sizeof(size_t);
  12. asm volatile(
  13. "rep movsl\n"
  14. : "=S"(src), "=D"(dest)
  15. : "S"(src), "D"(dest), "c"(size_ts)
  16. : "memory");
  17. n -= size_ts * sizeof(size_t);
  18. if (n == 0)
  19. return dest_ptr;
  20. }
  21. asm volatile(
  22. "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
  23. : "memory");
  24. return dest_ptr;
  25. }
  26. void* memmove(void* dest, const void* src, size_t n)
  27. {
  28. if (dest < src)
  29. return memcpy(dest, src, n);
  30. u8* pd = (u8*)dest;
  31. const u8* ps = (const u8*)src;
  32. for (pd += n, ps += n; n--;)
  33. *--pd = *--ps;
  34. return dest;
  35. }
  36. char* strcpy(char* dest, const char* src)
  37. {
  38. auto* dest_ptr = dest;
  39. auto* src_ptr = src;
  40. while ((*dest_ptr++ = *src_ptr++) != '\0')
  41. ;
  42. return dest;
  43. }
  44. char* strncpy(char* dest, const char* src, size_t n)
  45. {
  46. size_t i;
  47. for (i = 0; i < n && src[i] != '\0'; ++i)
  48. dest[i] = src[i];
  49. for (; i < n; ++i)
  50. dest[i] = '\0';
  51. return dest;
  52. }
  53. void* memset(void* dest_ptr, int c, size_t n)
  54. {
  55. size_t dest = (size_t)dest_ptr;
  56. // FIXME: Support starting at an unaligned address.
  57. if (!(dest & 0x3) && n >= 12) {
  58. size_t size_ts = n / sizeof(size_t);
  59. size_t expanded_c = (u8)c;
  60. expanded_c |= expanded_c << 8;
  61. expanded_c |= expanded_c << 16;
  62. asm volatile(
  63. "rep stosl\n"
  64. : "=D"(dest)
  65. : "D"(dest), "c"(size_ts), "a"(expanded_c)
  66. : "memory");
  67. n -= size_ts * sizeof(size_t);
  68. if (n == 0)
  69. return dest_ptr;
  70. }
  71. asm volatile(
  72. "rep stosb\n"
  73. : "=D"(dest), "=c"(n)
  74. : "0"(dest), "1"(n), "a"(c)
  75. : "memory");
  76. return dest_ptr;
  77. }
  78. char* strrchr(const char* str, int ch)
  79. {
  80. char* last = nullptr;
  81. char c;
  82. for (; (c = *str); ++str) {
  83. if (c == ch)
  84. last = const_cast<char*>(str);
  85. }
  86. return last;
  87. }
  88. size_t strlen(const char* str)
  89. {
  90. size_t len = 0;
  91. while (*(str++))
  92. ++len;
  93. return len;
  94. }
  95. int strcmp(const char* s1, const char* s2)
  96. {
  97. for (; *s1 == *s2; ++s1, ++s2) {
  98. if (*s1 == 0)
  99. return 0;
  100. }
  101. return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
  102. }
  103. char* strdup(const char* str)
  104. {
  105. size_t len = strlen(str);
  106. char* new_str = (char*)kmalloc(len + 1);
  107. strcpy(new_str, str);
  108. return new_str;
  109. }
  110. int memcmp(const void* v1, const void* v2, size_t n)
  111. {
  112. auto* s1 = (const u8*)v1;
  113. auto* s2 = (const u8*)v2;
  114. while (n-- > 0) {
  115. if (*s1++ != *s2++)
  116. return s1[-1] < s2[-1] ? -1 : 1;
  117. }
  118. return 0;
  119. }
  120. [[noreturn]] void __cxa_pure_virtual()
  121. {
  122. ASSERT_NOT_REACHED();
  123. }
  124. static inline uint32_t divq(uint64_t n, uint32_t d)
  125. {
  126. uint32_t n1 = n >> 32;
  127. uint32_t n0 = n;
  128. uint32_t q;
  129. uint32_t r;
  130. asm volatile("divl %4"
  131. : "=d"(r), "=a"(q)
  132. : "0"(n1), "1"(n0), "rm"(d));
  133. return q;
  134. }
  135. static uint64_t unsigned_divide64(uint64_t n, uint64_t d)
  136. {
  137. if ((d >> 32) == 0) {
  138. uint64_t b = 1ULL << 32;
  139. uint32_t n1 = n >> 32;
  140. uint32_t n0 = n;
  141. uint32_t d0 = d;
  142. return divq(b * (n1 % d0) + n0, d0) + b * (n1 / d0);
  143. }
  144. if (n < d)
  145. return 0;
  146. uint32_t d1 = d >> 32u;
  147. int s = __builtin_clz(d1);
  148. uint64_t q = divq(n >> 1, (d << s) >> 32) >> (31 - s);
  149. return n - (q - 1) * d < d ? q - 1 : q;
  150. }
  151. static uint32_t unsigned_modulo64(uint64_t n, uint64_t d)
  152. {
  153. return n - d * unsigned_divide64(n, d);
  154. }
  155. static int64_t signed_divide64(int64_t n, int64_t d)
  156. {
  157. uint64_t n_abs = n >= 0 ? (uint64_t)n : -(uint64_t)n;
  158. uint64_t d_abs = d >= 0 ? (uint64_t)d : -(uint64_t)d;
  159. uint64_t q_abs = unsigned_divide64(n_abs, d_abs);
  160. return (n < 0) == (d < 0) ? (int64_t)q_abs : -(int64_t)q_abs;
  161. }
  162. static int32_t signed_modulo64(int64_t n, int64_t d)
  163. {
  164. return n - d * signed_divide64(n, d);
  165. }
  166. int64_t __divdi3(int64_t n, int64_t d)
  167. {
  168. return signed_divide64(n, d);
  169. }
  170. int64_t __moddi3(int64_t n, int64_t d)
  171. {
  172. return signed_modulo64(n, d);
  173. }
  174. uint64_t __udivdi3(uint64_t n, uint64_t d)
  175. {
  176. return unsigned_divide64(n, d);
  177. }
  178. uint64_t __umoddi3(uint64_t n, uint64_t d)
  179. {
  180. return unsigned_modulo64(n, d);
  181. }
  182. uint64_t __udivmoddi4(uint64_t n, uint64_t d, uint64_t* r)
  183. {
  184. uint64_t q = 0;
  185. uint64_t qbit = 1;
  186. if (!d)
  187. return 1 / ((unsigned)d);
  188. while ((int64_t)d >= 0) {
  189. d <<= 1;
  190. qbit <<= 1;
  191. }
  192. while (qbit) {
  193. if (d <= n) {
  194. n -= d;
  195. q += qbit;
  196. }
  197. d >>= 1;
  198. qbit >>= 1;
  199. }
  200. if (r)
  201. *r = n;
  202. return q;
  203. }
  204. }