init.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
  4. * Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
  5. *
  6. * SPDX-License-Identifier: BSD-2-Clause
  7. */
  8. #include <AK/Types.h>
  9. #include <Kernel/Boot/Multiboot.h>
  10. #include <Kernel/Memory/PhysicalAddress.h>
  11. #include <Kernel/Memory/VirtualAddress.h>
  12. #include <Kernel/Prekernel/Prekernel.h>
  13. #include <LibELF/ELFABI.h>
  14. #include <LibELF/Relocation.h>
  15. #if ARCH(X86_64)
  16. # include <Kernel/Arch/x86_64/ASM_wrapper.h>
  17. # include <Kernel/Arch/x86_64/CPUID.h>
  18. #endif
  19. // Defined in the linker script
  20. extern uintptr_t __stack_chk_guard;
  21. uintptr_t __stack_chk_guard __attribute__((used));
  22. extern "C" [[noreturn]] void __stack_chk_fail();
  23. extern "C" u8 start_of_prekernel_image[];
  24. extern "C" u8 end_of_prekernel_image[];
  25. extern "C" u8 _binary_Kernel_standalone_start[];
  26. extern "C" u8 end_of_prekernel_image_after_kernel_image[];
  27. extern "C" u8 gdt64ptr[];
  28. extern "C" u16 code64_sel;
  29. extern "C" u64 boot_pml4t[512];
  30. extern "C" u64 boot_pdpt[512];
  31. extern "C" u64 boot_pd0[512];
  32. extern "C" u64 boot_pd0_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
  33. extern "C" u64 boot_pd_kernel[512];
  34. extern "C" u64 boot_pd_kernel_pt0[512];
  35. extern "C" u64 boot_pd_kernel_image_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
  36. extern "C" u64 boot_pd_kernel_pt1023[512];
  37. extern "C" char const kernel_cmdline[4096];
  38. extern "C" void reload_cr3();
  39. extern "C" {
  40. multiboot_info_t* multiboot_info_ptr;
  41. }
  42. [[noreturn]] static void halt()
  43. {
  44. asm volatile("hlt");
  45. __builtin_unreachable();
  46. }
  47. void __stack_chk_fail()
  48. {
  49. halt();
  50. }
  51. void __assertion_failed(char const*, char const*, unsigned int, char const*)
  52. {
  53. halt();
  54. }
  55. namespace Kernel {
  56. // boot.S expects these functions to exactly have the following signatures.
  57. // We declare them here to ensure their signatures don't accidentally change.
  58. extern "C" [[noreturn]] void init();
  59. // SerenityOS Pre-Kernel Environment C++ entry point :^)
  60. //
  61. // This is where C++ execution begins, after boot.S transfers control here.
  62. //
  63. u64 generate_secure_seed();
  64. static void memmove_virt(void* dest_virt, FlatPtr dest_phys, void* src, size_t n)
  65. {
  66. if (dest_phys < (FlatPtr)src) {
  67. u8* pd = (u8*)dest_virt;
  68. u8 const* ps = (u8 const*)src;
  69. for (; n--;)
  70. *pd++ = *ps++;
  71. return;
  72. }
  73. u8* pd = (u8*)dest_virt;
  74. u8 const* ps = (u8 const*)src;
  75. for (pd += n, ps += n; n--;)
  76. *--pd = *--ps;
  77. }
  78. extern "C" [[noreturn]] void init()
  79. {
  80. u32 initrd_module_start = 0;
  81. u32 initrd_module_end = 0;
  82. if (multiboot_info_ptr->mods_count > 0) {
  83. // We only consider the first specified multiboot module, and ignore
  84. // the rest of the modules.
  85. multiboot_module_entry_t* initrd_module = (multiboot_module_entry_t*)(FlatPtr)multiboot_info_ptr->mods_addr;
  86. if (initrd_module->start > initrd_module->end)
  87. halt();
  88. initrd_module_start = initrd_module->start;
  89. initrd_module_end = initrd_module->end;
  90. }
  91. u8* kernel_image = _binary_Kernel_standalone_start;
  92. // copy the ELF header and program headers because we might end up overwriting them
  93. Elf_Ehdr kernel_elf_header = *(Elf_Ehdr*)kernel_image;
  94. Elf_Phdr kernel_program_headers[16];
  95. if (kernel_elf_header.e_phnum > array_size(kernel_program_headers))
  96. halt();
  97. __builtin_memcpy(kernel_program_headers, kernel_image + kernel_elf_header.e_phoff, sizeof(Elf_Phdr) * kernel_elf_header.e_phnum);
  98. FlatPtr kernel_physical_base = (FlatPtr)0x200000;
  99. FlatPtr default_kernel_load_base = KERNEL_MAPPING_BASE + kernel_physical_base;
  100. FlatPtr kernel_load_base = default_kernel_load_base;
  101. if (__builtin_strstr(kernel_cmdline, "disable_kaslr") == nullptr) {
  102. FlatPtr maximum_offset = (FlatPtr)KERNEL_PD_SIZE - MAX_KERNEL_SIZE - 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
  103. #ifdef KERNEL_ADDRESS_SANITIZER_ENABLED
  104. // To allow for easy mapping between the kernel virtual addresses and KASAN shadow memory,
  105. // we map shadow memory at the very end of the virtual range, so that we can index into it
  106. // using just an offset. To ensure this range is free when needed, we restrict the possible
  107. // KASLR range when KASAN is enabled to make sure we don't use the end of the virtual range.
  108. maximum_offset -= ceil_div(maximum_offset, 9ul);
  109. #endif
  110. kernel_load_base += (generate_secure_seed() % maximum_offset);
  111. kernel_load_base &= ~(2 * MiB - 1);
  112. }
  113. FlatPtr kernel_load_end = 0;
  114. for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
  115. auto& kernel_program_header = kernel_program_headers[i];
  116. if (kernel_program_header.p_type != PT_LOAD)
  117. continue;
  118. auto start = kernel_load_base + kernel_program_header.p_vaddr;
  119. auto end = start + kernel_program_header.p_memsz;
  120. if (start < (FlatPtr)end_of_prekernel_image)
  121. halt();
  122. if (kernel_physical_base + kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
  123. halt();
  124. if (end > kernel_load_end)
  125. kernel_load_end = end;
  126. }
  127. // align to 1GB
  128. FlatPtr kernel_mapping_base = kernel_load_base & ~(FlatPtr)0x3fffffff;
  129. VERIFY(kernel_load_base % 0x1000 == 0);
  130. VERIFY(kernel_load_base >= kernel_mapping_base + kernel_physical_base);
  131. int pdpt_flags = 0x3;
  132. boot_pdpt[(kernel_mapping_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
  133. boot_pd_kernel[0] = (FlatPtr)boot_pd_kernel_pt0 | 0x3;
  134. for (FlatPtr vaddr = kernel_load_base; vaddr <= kernel_load_end; vaddr += PAGE_SIZE * 512)
  135. boot_pd_kernel[(vaddr - kernel_mapping_base) >> 21] = (FlatPtr)(&boot_pd_kernel_image_pts[(vaddr - kernel_load_base) >> 12]) | 0x3;
  136. __builtin_memset(boot_pd_kernel_pt0, 0, sizeof(boot_pd_kernel_pt0));
  137. VERIFY((size_t)end_of_prekernel_image < array_size(boot_pd_kernel_pt0) * PAGE_SIZE);
  138. /* pseudo-identity map 0M - end_of_prekernel_image */
  139. for (size_t i = 0; i < (FlatPtr)end_of_prekernel_image / PAGE_SIZE; i++)
  140. boot_pd_kernel_pt0[i] = i * PAGE_SIZE | 0x3;
  141. __builtin_memset(boot_pd_kernel_image_pts, 0, sizeof(boot_pd_kernel_image_pts));
  142. for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
  143. auto& kernel_program_header = kernel_program_headers[i];
  144. if (kernel_program_header.p_type != PT_LOAD)
  145. continue;
  146. for (FlatPtr offset = 0; offset < kernel_program_header.p_memsz; offset += PAGE_SIZE) {
  147. auto pte_index = ((kernel_load_base & 0x1fffff) + kernel_program_header.p_vaddr + offset) >> 12;
  148. boot_pd_kernel_image_pts[pte_index] = (kernel_physical_base + kernel_program_header.p_paddr + offset) | 0x3;
  149. }
  150. }
  151. boot_pd_kernel[511] = (FlatPtr)boot_pd_kernel_pt1023 | 0x3;
  152. // Fill-in multiboot-related info before loading kernel as to avoid accidentally
  153. // overwriting mbi end as to avoid to check whether it's mapped after reloading page tables.
  154. BootInfo info {};
  155. auto adjust_by_mapping_base = [kernel_mapping_base](auto ptr) {
  156. return (decltype(ptr))((FlatPtr)ptr + kernel_mapping_base);
  157. };
  158. info.multiboot_flags = multiboot_info_ptr->flags;
  159. info.multiboot_memory_map = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mmap_addr);
  160. info.multiboot_memory_map_count = multiboot_info_ptr->mmap_length / sizeof(multiboot_memory_map_t);
  161. if (initrd_module_start != 0 && initrd_module_end != 0) {
  162. info.multiboot_module_physical_ptr = initrd_module_start;
  163. info.multiboot_module_length = initrd_module_end - initrd_module_start;
  164. }
  165. if ((multiboot_info_ptr->flags & MULTIBOOT_INFO_FRAMEBUFFER_INFO) != 0) {
  166. info.multiboot_framebuffer_addr = multiboot_info_ptr->framebuffer_addr;
  167. info.multiboot_framebuffer_pitch = multiboot_info_ptr->framebuffer_pitch;
  168. info.multiboot_framebuffer_width = multiboot_info_ptr->framebuffer_width;
  169. info.multiboot_framebuffer_height = multiboot_info_ptr->framebuffer_height;
  170. info.multiboot_framebuffer_bpp = multiboot_info_ptr->framebuffer_bpp;
  171. info.multiboot_framebuffer_type = multiboot_info_ptr->framebuffer_type;
  172. }
  173. reload_cr3();
  174. int backwards = kernel_physical_base >= (FlatPtr)kernel_image;
  175. for (ssize_t i = 0; i < kernel_elf_header.e_phnum; i++) {
  176. auto& kernel_program_header = kernel_program_headers[backwards ? kernel_elf_header.e_phnum - 1 - i : i];
  177. if (kernel_program_header.p_type != PT_LOAD)
  178. continue;
  179. memmove_virt((u8*)kernel_load_base + kernel_program_header.p_vaddr,
  180. kernel_physical_base + kernel_program_header.p_vaddr,
  181. kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
  182. }
  183. for (ssize_t i = kernel_elf_header.e_phnum - 1; i >= 0; i--) {
  184. auto& kernel_program_header = kernel_program_headers[i];
  185. if (kernel_program_header.p_type != PT_LOAD)
  186. continue;
  187. __builtin_memset((u8*)kernel_load_base + kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
  188. }
  189. info.start_of_prekernel_image = (PhysicalPtr)start_of_prekernel_image;
  190. info.end_of_prekernel_image = (PhysicalPtr)end_of_prekernel_image;
  191. info.physical_to_virtual_offset = kernel_load_base - kernel_physical_base;
  192. info.kernel_mapping_base = kernel_mapping_base;
  193. info.kernel_load_base = kernel_load_base;
  194. #if ARCH(X86_64)
  195. info.gdt64ptr = (PhysicalPtr)gdt64ptr;
  196. info.code64_sel = code64_sel;
  197. info.boot_pml4t = (PhysicalPtr)boot_pml4t;
  198. #endif
  199. info.boot_pdpt = (PhysicalPtr)boot_pdpt;
  200. info.boot_pd0 = (PhysicalPtr)boot_pd0;
  201. info.boot_pd_kernel = (PhysicalPtr)boot_pd_kernel;
  202. info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_mapping_base(boot_pd_kernel_pt1023);
  203. info.kernel_cmdline = (FlatPtr)adjust_by_mapping_base(kernel_cmdline);
  204. asm(
  205. "mov %0, %%rax\n"
  206. "add %%rax, %%rsp" ::"g"(kernel_mapping_base)
  207. : "ax");
  208. // unmap the 0-1MB region
  209. for (size_t i = 0; i < 256; i++)
  210. boot_pd0_pts[i] = 0;
  211. // unmap the end_of_prekernel_image - MAX_KERNEL_SIZE region
  212. for (FlatPtr vaddr = (FlatPtr)end_of_prekernel_image; vaddr < MAX_KERNEL_SIZE; vaddr += PAGE_SIZE)
  213. boot_pd0_pts[vaddr >> 12] = 0;
  214. reload_cr3();
  215. ELF::perform_relative_relocations(kernel_load_base);
  216. void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))(kernel_load_base + kernel_elf_header.e_entry);
  217. entry(*adjust_by_mapping_base(&info));
  218. __builtin_unreachable();
  219. }
  220. u64 generate_secure_seed()
  221. {
  222. u32 seed = 0xFEEBDAED;
  223. #if ARCH(X86_64)
  224. CPUID processor_info(0x1);
  225. if (processor_info.edx() & (1 << 4)) // TSC
  226. seed ^= read_tsc();
  227. if (processor_info.ecx() & (1 << 30)) // RDRAND
  228. seed ^= read_rdrand();
  229. CPUID extended_features(0x7);
  230. if (extended_features.ebx() & (1 << 18)) // RDSEED
  231. seed ^= read_rdseed();
  232. #else
  233. # warning No native randomness source available for this architecture
  234. #endif
  235. seed ^= multiboot_info_ptr->mods_addr;
  236. seed ^= multiboot_info_ptr->framebuffer_addr;
  237. return seed;
  238. }
  239. // Define some Itanium C++ ABI methods to stop the linker from complaining.
  240. // If we actually call these something has gone horribly wrong
  241. void* __dso_handle __attribute__((visibility("hidden")));
  242. }