Kernel: Introduce basic pre-kernel environment

This implements a simple bootloader that is capable of loading ELF64
kernel images. It does this by using QEMU/GRUB to load the kernel image
from disk and pass it to our bootloader as a Multiboot module.

The bootloader then parses the ELF image and sets it up appropriately.
The kernel's entry point is a C++ function with architecture-native
code.

Co-authored-by: Liav A <liavalb@gmail.com>
This commit is contained in:
Gunnar Beutner 2021-07-18 14:47:32 +02:00 committed by Andreas Kling
parent 357ddd393e
commit 7e94b090fe
Notes: sideshowbarker 2024-07-18 08:48:18 +09:00
30 changed files with 1207 additions and 181 deletions

View file

@ -88,7 +88,7 @@
# define PAGE_SIZE sysconf(_SC_PAGESIZE)
#endif
#ifndef _BOOTLOADER
#ifdef __cplusplus
ALWAYS_INLINE int count_trailing_zeroes_32(unsigned int val)
{
# if defined(__GNUC__) || defined(__clang__)

View file

@ -1,4 +1,3 @@
#define _BOOTLOADER
#include <AK/Platform.h>
#include <Kernel/Sections.h>

View file

@ -1,4 +1,3 @@
#define _BOOTLOADER
#include <Kernel/Sections.h>
.extern init_ap
@ -110,7 +109,7 @@ apic_ap_start32_2:
/* push the Processor pointer this CPU is going to use */
movl (ap_cpu_init_processor_info_array - apic_ap_start)(%ebp), %eax
addl $KERNEL_BASE, %eax
addl kernel_base, %eax
movl 0(%eax, %esi, 4), %eax
push %eax

View file

@ -1,6 +1,19 @@
#define _BOOTLOADER
#include <Kernel/Sections.h>
.section .text
.global gdt64ptr
gdt64ptr:
#if ARCH(X86_64)
.quad 0
#else
.int 0
#endif
.global code64_sel
code64_sel:
.short 0
.extern init_ap
.type init_ap, @function
@ -92,16 +105,15 @@ apic_ap_start32:
movl %eax, %cr0
/* load the temporary 64-bit gdt from boot that points above 3GB */
mov $(gdt64ptr - KERNEL_BASE), %eax
// FIXME: uncomment this
//mov gdt64ptr, %eax
lgdt (%eax)
/* jump above 3GB into our identity mapped area now */
ljmpl $code64_sel, $(apic_ap_start64 - apic_ap_start + 0xc0008000)
// FIXME: this assumes that code64_sel is always 8
ljmpl $8, $(apic_ap_start64 - apic_ap_start + 0xc0008000)
.code64
apic_ap_start64:
movabs $gdt64ptr, %rax
lgdt (%rax)
mov $0, %ax
mov %ax, %ss
mov %ax, %ds
@ -129,7 +141,8 @@ apic_ap_start64:
/* push the Processor pointer this CPU is going to use */
movq (ap_cpu_init_processor_info_array - apic_ap_start)(%ebp), %rax
movq $KERNEL_BASE, %r8
movabsq $(kernel_base), %r8
movq (%r8), %r8
addq %r8, %rax
movq 0(%rax, %rsi, 4), %rax
push %rax

View file

@ -130,6 +130,7 @@ set(KERNEL_SOURCES
KLexicalPath.cpp
KString.cpp
KSyms.cpp
MiniStdLib.cpp
Mutex.cpp
Net/E1000ENetworkAdapter.cpp
Net/E1000NetworkAdapter.cpp
@ -289,8 +290,6 @@ set(KERNEL_SOURCES
set(KERNEL_SOURCES
${KERNEL_SOURCES}
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/ASM_wrapper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Boot/boot.S
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Boot/multiboot.S
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/CPU.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Interrupts.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Processor.cpp
@ -378,8 +377,6 @@ add_compile_definitions(KERNEL)
# It's needed because CLion doesn't understand the way we switch compilers mid-build.
add_compile_definitions(__serenity__)
add_link_options(LINKER:-T ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld -nostdlib)
add_library(kernel_heap STATIC ${KERNEL_HEAP_SOURCES})
if (${CMAKE_HOST_SYSTEM_NAME} MATCHES SerenityOS)
@ -394,28 +391,22 @@ else()
link_directories(${TOOLCHAIN_ROOT}/lib/gcc/${SERENITY_ARCH}-pc-serenity/${GCC_VERSION}/)
endif()
if ("${SERENITY_ARCH}" STREQUAL "i686")
set(KERNEL_TARGET Kernel32)
else()
set(KERNEL_TARGET Kernel64)
endif()
add_executable(Kernel ${SOURCES})
add_dependencies(Kernel generate_EscapeSequenceStateMachine.h)
add_executable(${KERNEL_TARGET} ${SOURCES})
add_dependencies(${KERNEL_TARGET} generate_EscapeSequenceStateMachine.h)
set_target_properties(${KERNEL_TARGET} PROPERTIES LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld)
target_link_options(Kernel PRIVATE LINKER:-T ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld -nostdlib)
set_target_properties(Kernel PROPERTIES LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld)
if (ENABLE_KERNEL_LTO)
include(CheckIPOSupported)
check_ipo_supported()
set_property(TARGET ${KERNEL_TARGET} PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
set_property(TARGET Kernel PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
endif()
target_link_libraries(${KERNEL_TARGET} kernel_heap gcc)
add_dependencies(${KERNEL_TARGET} kernel_heap)
target_link_libraries(Kernel kernel_heap gcc)
add_dependencies(Kernel kernel_heap)
add_custom_command(
TARGET ${KERNEL_TARGET} POST_BUILD
COMMAND ${CMAKE_OBJCOPY} -O elf32-i386 ${CMAKE_CURRENT_BINARY_DIR}/${KERNEL_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/Kernel
TARGET Kernel POST_BUILD
COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/mkmap.sh
COMMAND ${CMAKE_COMMAND} -E env OBJCOPY=${CMAKE_OBJCOPY} sh ${CMAKE_CURRENT_SOURCE_DIR}/embedmap.sh
COMMAND ${CMAKE_OBJCOPY} --only-keep-debug Kernel Kernel.debug
@ -431,4 +422,5 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/kernel.map" DESTINATION res)
serenity_install_headers(Kernel)
serenity_install_sources(Kernel)
add_subdirectory(Prekernel)
add_subdirectory(Modules)

View file

@ -118,7 +118,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksym
if (use_ksyms) {
FlatPtr copied_stack_ptr[2];
for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer; stack_ptr && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)copied_stack_ptr[0]) {
if ((FlatPtr)stack_ptr < KERNEL_BASE)
if ((FlatPtr)stack_ptr < kernel_base)
break;
void* fault_at;

92
Kernel/MiniStdLib.cpp Normal file
View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/StdLib.h>
extern "C" {
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
{
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
#if ARCH(I386)
asm volatile(
"rep movsl\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
#else
asm volatile(
"rep movsq\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
#endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
: "memory");
return dest_ptr;
}
void* memmove(void* dest, const void* src, size_t n)
{
if (dest < src)
return memcpy(dest, src, n);
u8* pd = (u8*)dest;
const u8* ps = (const u8*)src;
for (pd += n, ps += n; n--;)
*--pd = *--ps;
return dest;
}
void* memset(void* dest_ptr, int c, size_t n)
{
size_t dest = (size_t)dest_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = explode_byte((u8)c);
#if ARCH(I386)
asm volatile(
"rep stosl\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
#else
asm volatile(
"rep stosq\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
#endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep stosb\n"
: "=D"(dest), "=c"(n)
: "0"(dest), "1"(n), "a"(c)
: "memory");
return dest_ptr;
}
size_t strlen(const char* str)
{
size_t len = 0;
while (*(str++))
++len;
return len;
}
}

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#include <Kernel/Multiboot.h>
namespace Kernel {
struct [[gnu::packed]] BootInfo {
u8 const* start_of_prekernel_image;
u8 const* end_of_prekernel_image;
FlatPtr kernel_base;
multiboot_info* multiboot_info_ptr;
#if ARCH(X86_64)
u32 gdt64ptr;
u16 code64_sel;
FlatPtr boot_pml4t;
#endif
FlatPtr boot_pdpt;
FlatPtr boot_pd0;
FlatPtr boot_pd_kernel;
FlatPtr boot_pd_kernel_pt1023;
char const* kernel_cmdline;
};
}

View file

@ -0,0 +1,28 @@
set(SOURCES
boot.S
multiboot.S
init.cpp
UBSanitizer.cpp
../MiniStdLib.cpp
)
if ("${SERENITY_ARCH}" STREQUAL "i686")
set(PREKERNEL_TARGET Prekernel32)
else()
set(PREKERNEL_TARGET Prekernel64)
endif()
add_executable(${PREKERNEL_TARGET} ${SOURCES})
target_link_options(${PREKERNEL_TARGET} PRIVATE LINKER:-T ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld -nostdlib)
set_target_properties(${PREKERNEL_TARGET} PROPERTIES LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/linker.ld)
target_link_libraries(${PREKERNEL_TARGET} gcc supc++)
add_custom_command(
TARGET ${PREKERNEL_TARGET} POST_BUILD
COMMAND ${TOOLCHAIN_PREFIX}objcopy -O elf32-i386 ${CMAKE_CURRENT_BINARY_DIR}/${PREKERNEL_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/Prekernel
BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/Prekernel
)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/Prekernel" DESTINATION boot)

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#define MAX_KERNEL_SIZE 0x3000000

View file

@ -0,0 +1,142 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/UBSanitizer.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/KSyms.h>
using namespace AK::UBSanitizer;
bool AK::UBSanitizer::g_ubsan_is_deadly { true };
extern "C" {
static void print_location(const SourceLocation&)
{
asm volatile("cli; hlt");
}
void __ubsan_handle_load_invalid_value(const InvalidValueData&, ValueHandle) __attribute__((used));
void __ubsan_handle_load_invalid_value(const InvalidValueData& data, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_nonnull_arg(const NonnullArgData&) __attribute__((used));
void __ubsan_handle_nonnull_arg(const NonnullArgData& data)
{
print_location(data.location);
}
void __ubsan_handle_nullability_arg(const NonnullArgData&) __attribute__((used));
void __ubsan_handle_nullability_arg(const NonnullArgData& data)
{
print_location(data.location);
}
void __ubsan_handle_nonnull_return_v1(const NonnullReturnData&, const SourceLocation&) __attribute__((used));
void __ubsan_handle_nonnull_return_v1(const NonnullReturnData&, const SourceLocation& location)
{
print_location(location);
}
void __ubsan_handle_nullability_return_v1(const NonnullReturnData& data, const SourceLocation& location) __attribute__((used));
void __ubsan_handle_nullability_return_v1(const NonnullReturnData&, const SourceLocation& location)
{
print_location(location);
}
void __ubsan_handle_vla_bound_not_positive(const VLABoundData&, ValueHandle) __attribute__((used));
void __ubsan_handle_vla_bound_not_positive(const VLABoundData& data, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_add_overflow(const OverflowData&, ValueHandle lhs, ValueHandle rhs) __attribute__((used));
void __ubsan_handle_add_overflow(const OverflowData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_sub_overflow(const OverflowData&, ValueHandle lhs, ValueHandle rhs) __attribute__((used));
void __ubsan_handle_sub_overflow(const OverflowData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_negate_overflow(const OverflowData&, ValueHandle) __attribute__((used));
void __ubsan_handle_negate_overflow(const OverflowData& data, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_mul_overflow(const OverflowData&, ValueHandle lhs, ValueHandle rhs) __attribute__((used));
void __ubsan_handle_mul_overflow(const OverflowData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_shift_out_of_bounds(const ShiftOutOfBoundsData&, ValueHandle lhs, ValueHandle rhs) __attribute__((used));
void __ubsan_handle_shift_out_of_bounds(const ShiftOutOfBoundsData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_divrem_overflow(const OverflowData&, ValueHandle lhs, ValueHandle rhs) __attribute__((used));
void __ubsan_handle_divrem_overflow(const OverflowData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_out_of_bounds(const OutOfBoundsData&, ValueHandle) __attribute__((used));
void __ubsan_handle_out_of_bounds(const OutOfBoundsData& data, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_type_mismatch_v1(const TypeMismatchData&, ValueHandle) __attribute__((used));
void __ubsan_handle_type_mismatch_v1(const TypeMismatchData& data, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_alignment_assumption(const AlignmentAssumptionData&, ValueHandle, ValueHandle, ValueHandle) __attribute__((used));
void __ubsan_handle_alignment_assumption(const AlignmentAssumptionData& data, ValueHandle, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_builtin_unreachable(const UnreachableData&) __attribute__((used));
void __ubsan_handle_builtin_unreachable(const UnreachableData& data)
{
print_location(data.location);
}
void __ubsan_handle_missing_return(const UnreachableData&) __attribute__((used));
void __ubsan_handle_missing_return(const UnreachableData& data)
{
print_location(data.location);
}
void __ubsan_handle_implicit_conversion(const ImplicitConversionData&, ValueHandle, ValueHandle) __attribute__((used));
void __ubsan_handle_implicit_conversion(const ImplicitConversionData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
void __ubsan_handle_invalid_builtin(const InvalidBuiltinData) __attribute__((used));
void __ubsan_handle_invalid_builtin(const InvalidBuiltinData data)
{
print_location(data.location);
}
void __ubsan_handle_pointer_overflow(const PointerOverflowData&, ValueHandle, ValueHandle) __attribute__((used));
void __ubsan_handle_pointer_overflow(const PointerOverflowData& data, ValueHandle, ValueHandle)
{
print_location(data.location);
}
}

509
Kernel/Prekernel/boot.S Normal file
View file

@ -0,0 +1,509 @@
#include <AK/Platform.h>
#include <Kernel/Prekernel/Prekernel.h>
.code32
.section .stack, "aw", @nobits
stack_bottom:
.skip 32768
stack_top:
.global kernel_cmdline
kernel_cmdline:
.skip 4096
.section .page_tables, "aw", @nobits
.align 4096
#if ARCH(X86_64)
.global boot_pml4t
boot_pml4t:
.skip 4096
#endif
.global boot_pdpt
boot_pdpt:
.skip 4096
.global boot_pd0
boot_pd0:
.skip 4096
.global boot_pd0_pts
boot_pd0_pts:
.skip 4096 * (MAX_KERNEL_SIZE >> 21)
.global boot_pd_kernel
boot_pd_kernel:
.skip 4096
.global boot_pd_kernel_pts
boot_pd_kernel_pts:
.skip 4096 * (MAX_KERNEL_SIZE >> 21)
.global boot_pd_kernel_pt1023
boot_pd_kernel_pt1023:
.skip 4096
.section .boot_text, "ax"
.global start
.type start, @function
.extern init
.type init, @function
.global reload_cr3
.type reload_cr3, @function
.extern multiboot_info_ptr
.type multiboot_info_ptr, @object
/*
construct the following (64-bit PML4T) page table layout:
(the PML4T part is not used for 32-bit x86)
pml4t:
0: pdpt (0-512GB)
pdpt
0: boot_pd0 (0-1GB)
1: n/a (1-2GB)
2: n/a (2-3GB)
3: n/a (3-4GB)
boot_pd0 : 512 PDEs
boot_pd0_pts (0MB - MAX_KERNEL_SIZE) (id 512 4KB pages)
the page tables each contain 512 PTEs that map individual 4KB pages
*/
#if ARCH(X86_64)
gdt64:
.quad 0
gdt64code:
.quad (1<<43) | (1<<44) | (1<<47) | (1<<53) /* executable, code segment, present, 64-bit */
.global gdt64ptr
gdt64ptr:
.short . - gdt64 - 1
.quad gdt64
.set code64_sel_value, gdt64code - gdt64
.global code64_sel
code64_sel:
.short code64_sel_value
#endif
start:
jmp real_start
/*
this function assumes that paging is disabled (or everything is mapped 1:1)
param 1: pointer to string ended with null terminator (C string)
*/
print_and_halt:
/* from now on, we don't really care about booting because we are missing required CPU features such as PAE or long mode.
the flow from now is like so:
1. Copy all necessary parts to low memory section in RAM
2. Jump to that section
3. In that section we do:
a. exit protected mode to pure 16 bit real mode
b. load the "<missing feature> is not supported" String, call the BIOS print to screen service
c. halt
*/
.equ COPIED_STRING_LOCATION, 0x400
.equ GDT_REAL_MODE_LOCATION, 0x45000
.equ EXITING_PROTECTED_MODE_CODE_LOCATION, 0x10000
.equ REAL_MODE_CODE, 0x500
.equ PROTECTED_MODE_16_BIT_CODE, 0x600
movl %esp, %ebp
movl 4(%ebp), %edi
/* Copy string to low memory section */
movl %edi, %esi
xor %ecx, %ecx
pushl %eax
pushl %edi
check_string_length:
movb (%edi), %ah
cmp $0, %ah
je check_string_length_exit
inc %ecx
inc %edi
jmp check_string_length
check_string_length_exit:
popl %edi
popl %eax
/* source address of the code is ESI */
movw %cx, (COPIED_STRING_LOCATION)
mov $COPIED_STRING_LOCATION + 2, %edi /* destination address of the code */
rep movsb
/* Copy gdt_table_real_mode to low memory section */
movl $gdt_table_real_mode, %eax
movl $gdt_table_real_mode_end, %ebx
movl %ebx, %ecx
sub %eax, %ecx
mov %eax, %esi /* source address of the code */
mov $GDT_REAL_MODE_LOCATION, %edi /* destination address of the code */
rep movsb
/* Copy protected_mode_16_bit to real_mode to low memory section */
movl $protected_mode_16_bit, %eax
movl $real_mode, %ebx
movl %ebx, %ecx
sub %eax, %ecx
mov %eax, %esi /* source address of the code */
mov $PROTECTED_MODE_16_BIT_CODE, %edi /* destination address of the code */
rep movsb
/* Copy real_mode to end_of_print_and_halt_function to low memory section */
movl $real_mode, %eax
movl $end_of_print_and_halt_function, %ebx
movl %ebx, %ecx
sub %eax, %ecx
mov %eax, %esi /* source address of the code */
mov $REAL_MODE_CODE, %edi /* destination address of the code */
rep movsb
/* Copy all opcodes from exiting_real_mode label to protected_mode_16_bit label to low memory RAM */
movl $exiting_real_mode, %eax
movl $protected_mode_16_bit, %ebx
movl %ebx, %ecx
sub %eax, %ecx
mov %eax, %esi /* source address of the code */
mov $EXITING_PROTECTED_MODE_CODE_LOCATION, %edi /* destination address of the code */
pushl %edi
rep movsb
popl %edi
pushl %edi
ret
gdt_table_real_mode:
.quad 0 /* Empty entry */
.short 0xffff
.short 0
.byte 0
.byte 0b10011010
.byte 0b00001111
.byte 0x0
.short 0xffff
.short 0
.byte 0
.byte 0b10010010
.byte 0b00001111
.byte 0x0
gdt_table_real_mode_end:
no_long_mode_string:
.asciz "Your computer does not support long mode (64-bit mode). Halting!"
no_pae_string:
.asciz "Your computer does not support PAE. Halting!"
kernel_image_too_big_string:
.asciz "Error: Kernel Image too big for memory slot. Halting!"
/*
This part is completely standalone - it doesn't involve any location from this
near code. It uses arbitrary locations in the low memory section of the RAM.
We don't really worry about where are these locations, because we only want to quickly
print a string and halt.
*/
.code32
exiting_real_mode:
/* Build IDT pointer and load it */
mov $0x50000, %eax
pushl %eax
movl $0x3ff, 0(%eax)
add $2, %eax
movl $0, 0(%eax)
popl %eax
lidt (%eax)
/* Build GDT pointer and load it */
mov $0x40000, %eax
pushl %eax
movl $32, 0(%eax)
add $2, %eax
movl $GDT_REAL_MODE_LOCATION, 0(%eax)
popl %eax
lgdt (%eax)
/* far jump to protected_mode_16_bit in 0x5000 */
pushw $8
push $PROTECTED_MODE_16_BIT_CODE
lret
hlt
.code16
protected_mode_16_bit:
xor %eax, %eax
movl $0x10, %eax
movw %ax, %ds
and $0xFE, %al /* switch to pure real mode */
mov %eax, %cr0
mov $0x10, %eax
movl %eax, %cr0
pushw $0
push $REAL_MODE_CODE
lret
hlt
real_mode:
movw $0x7000, %ax
movl $0x0000, %esp
movw %ax, %ss
xor %ax, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
mov $0x3, %ax
int $0x10
movb $0x13, %ah
movb $0x0, %bh
movb $0xf, %bl
movw (COPIED_STRING_LOCATION), %cx
movw $0, %dx
movw $COPIED_STRING_LOCATION + 2, %bp
int $0x10
movl $0xdeadcafe, %ebx
cli
hlt
end_of_print_and_halt_function:
.code32
real_start:
cli
cld
mov $end_of_prekernel_image, %esi
cmp $MAX_KERNEL_SIZE, %esi
jbe kernel_not_too_large
movl $kernel_image_too_big_string, %esi
pushl %esi
call print_and_halt
/* We should not return, but just in case, halt */
hlt
kernel_not_too_large:
/* test for PAE presence, save the most important registers from corruption */
pushl %eax
pushl %edx
pushl %ebx
movl $0x1, %eax /* PAE presence is in CPUID input 0x1 */
cpuid
testl $(1 << 6), %edx /* Test if the PAE-bit, which is bit 6, is set in the edx register. */
jnz pae_supported /* If the bit is not set, there is no PAE capability. */
/* Since there is no PAE capability, halt with an error message */
movl $no_pae_string, %esi
pushl %esi
call print_and_halt
/* We should not return, but just in case, halt */
hlt
#if ARCH(X86_64)
pae_supported:
movl $0x80000001, %eax
cpuid
testl $(1 << 29), %edx /* Test if the LM-bit, which is bit 29, is set in the edx register. */
jnz long_mode_supported /* If LM-bit is not enabled, there is no long mode. */
/* Since there is no long mode, halt with an error message */
movl $no_long_mode_string, %esi
pushl %esi
call print_and_halt
/* We should not return, but just in case, halt */
hlt
/* If both PAE and long mode is supported, continue with booting the system */
long_mode_supported:
/* restore the pushed registers and continue with booting */
popl %ebx
popl %edx
popl %eax
#else
/* If PAE is supported, continue with booting the system */
pae_supported:
/* restore the pushed registers and continue with booting */
popl %ebx
popl %edx
popl %eax
#endif
/* We don't know where the bootloader might have put the command line.
* It might be at an inconvenient location that we're not about to map,
* so let's just copy it to a convenient location while we have the whole
* memory space identity-mapped anyway. :^)
*/
movl %ebx, %esi
addl $16, %esi
movl (%esi), %esi
movl $1024, %ecx
movl $kernel_cmdline, %edi
rep movsl
#if ARCH(X86_64)
/* clear pml4t */
movl $boot_pml4t, %edi
movl $1024, %ecx
xorl %eax, %eax
rep stosl
/* set up pml4t[0] */
movl $boot_pml4t, %edi
movl $boot_pdpt, 0(%edi)
/* R/W + Present */
orl $0x3, 0(%edi)
#endif
/* clear pdpt */
movl $boot_pdpt, %edi
movl $1024, %ecx
xorl %eax, %eax
rep stosl
/* set up pdpt[0] and pdpt[3] */
movl $boot_pdpt, %edi
#if ARCH(X86_64)
movl $(boot_pd0 + 3), 0(%edi)
#else
movl $(boot_pd0 + 1), 0(%edi)
#endif
/* clear pd0 */
movl $boot_pd0, %edi
movl $1024, %ecx
xorl %eax, %eax
rep stosl
/* clear pd0's PTs */
movl $boot_pd0_pts, %edi
movl $(1024 * (MAX_KERNEL_SIZE >> 21)), %ecx
xorl %eax, %eax
rep stosl
/* add boot_pd0_pts to boot_pd0 */
movl $(MAX_KERNEL_SIZE >> 21), %ecx
movl $boot_pd0, %edi
movl $boot_pd0_pts, %eax
1:
movl %eax, 0(%edi)
/* R/W + Present */
orl $0x3, 0(%edi)
addl $8, %edi
addl $4096, %eax
loop 1b
/* identity map the 0MB to MAX_KERNEL_SIZE range */
movl $(512 * (MAX_KERNEL_SIZE >> 21)), %ecx
movl $boot_pd0_pts, %edi
xorl %eax, %eax
1:
movl %eax, 0(%edi)
/* R/W + Present */
orl $0x3, 0(%edi)
addl $8, %edi
addl $4096, %eax
loop 1b
#if ARCH(X86_64)
/* point CR3 to PML4T */
movl $boot_pml4t, %eax
#else
/* point CR3 to PDPT */
movl $boot_pdpt, %eax
#endif
movl %eax, %cr3
/* enable PAE + PSE */
movl %cr4, %eax
orl $0x60, %eax
movl %eax, %cr4
#if ARCH(X86_64)
1:
/* Enter Long-mode! ref(https://wiki.osdev.org/Setting_Up_Long_Mode)*/
mov $0xC0000080, %ecx /* Set the C-register to 0xC0000080, which is the EFER MSR.*/
rdmsr /* Read from the model-specific register.*/
or $(1 << 8), %eax /* Set the LM-bit which is the 9th bit (bit 8).*/
wrmsr /* Write to the model-specific register.*/
#endif
/* enable PG */
movl %cr0, %eax
orl $0x80000000, %eax
movl %eax, %cr0
/* set up stack */
mov $stack_top, %esp
and $-16, %esp
#if ARCH(X86_64)
/* Now we are in 32-bit compatibility mode, We still need to load a 64-bit GDT */
mov $gdt64ptr, %eax
lgdt (%eax)
ljmpl $code64_sel_value, $1f
.code64
1:
movl %ebx, %ebx
movq %rbx, multiboot_info_ptr
mov $0, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
#else
movl %ebx, multiboot_info_ptr
#endif
call reload_cr3
call init
cli
loop:
hlt
jmp loop
reload_cr3:
#if ARCH(X86_64)
pushq %rax
mov %cr3, %rax
mov %rax, %cr3
popq %rax
#else
pushl %eax
movl %cr3, %eax
movl %eax, %cr3
popl %eax
#endif
ret

179
Kernel/Prekernel/init.cpp Normal file
View file

@ -0,0 +1,179 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
* Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Types.h>
#include <Kernel/Multiboot.h>
#include <Kernel/Prekernel/BootInfo.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/VirtualAddress.h>
#include <LibC/elf.h>
// Defined in the linker script
extern size_t __stack_chk_guard;
size_t __stack_chk_guard;
extern "C" [[noreturn]] void __stack_chk_fail();
extern "C" u8 start_of_prekernel_image[];
extern "C" u8 end_of_prekernel_image[];
extern "C" u8 gdt64ptr[];
extern "C" u16 code64_sel;
extern "C" u64 boot_pml4t[512];
extern "C" u64 boot_pdpt[512];
extern "C" u64 boot_pd0[512];
extern "C" u64 boot_pd0_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel[512];
extern "C" u64 boot_pd_kernel_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
extern "C" u64 boot_pd_kernel_pt1023[512];
extern "C" char const kernel_cmdline[4096];
extern "C" void reload_cr3();
extern "C" {
multiboot_info_t* multiboot_info_ptr;
}
void __stack_chk_fail()
{
asm("ud2");
__builtin_unreachable();
}
namespace Kernel {
// boot.S expects these functions to exactly have the following signatures.
// We declare them here to ensure their signatures don't accidentally change.
extern "C" [[noreturn]] void init();
static void halt()
{
asm volatile("hlt");
}
// SerenityOS Pre-Kernel Environment C++ entry point :^)
//
// This is where C++ execution begins, after boot.S transfers control here.
//
extern "C" [[noreturn]] void init()
{
if (multiboot_info_ptr->mods_count < 1)
halt();
multiboot_module_entry_t* kernel_module = (multiboot_module_entry_t*)(FlatPtr)multiboot_info_ptr->mods_addr;
u8* kernel_image = (u8*)(FlatPtr)kernel_module->start;
ElfW(Ehdr)* kernel_elf_header = (ElfW(Ehdr)*)kernel_image;
ElfW(Phdr)* kernel_program_headers = (ElfW(Phdr*))((char*)kernel_elf_header + kernel_elf_header->e_phoff);
FlatPtr kernel_load_base = kernel_program_headers[0].p_vaddr;
FlatPtr kernel_load_end = kernel_program_headers[kernel_elf_header->e_phnum - 1].p_vaddr;
// align to 1GB
kernel_load_base &= ~(FlatPtr)0x3fffffff;
if (kernel_program_headers[0].p_vaddr < (FlatPtr)end_of_prekernel_image)
halt();
if (kernel_program_headers[0].p_paddr < (FlatPtr)end_of_prekernel_image)
halt();
#if ARCH(I386)
int pdpt_flags = 0x1;
#else
int pdpt_flags = 0x3;
#endif
boot_pdpt[(kernel_load_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
for (size_t i = 0; i <= (kernel_load_end - kernel_load_base) >> 21; i++)
boot_pd_kernel[i] = (FlatPtr)&boot_pd_kernel_pts[i * 512] | 0x3;
__builtin_memset(boot_pd_kernel_pts, 0, sizeof(boot_pd_kernel_pts));
/* pseudo-identity map 0M - end_of_prekernel_image */
for (size_t i = 0; i < (FlatPtr)end_of_prekernel_image / PAGE_SIZE; i++)
boot_pd_kernel_pts[i] = i * PAGE_SIZE | 0x3;
for (size_t i = 0; i < kernel_elf_header->e_phnum; i++) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
for (FlatPtr offset = 0; offset < kernel_program_header.p_memsz; offset += PAGE_SIZE) {
auto pte_index = (kernel_program_header.p_vaddr + offset - kernel_load_base) >> 12;
boot_pd_kernel_pts[pte_index] = (kernel_program_header.p_paddr + offset) | 0x3;
}
}
boot_pd_kernel[511] = (FlatPtr)boot_pd_kernel_pt1023 | 0x3;
reload_cr3();
for (ssize_t i = kernel_elf_header->e_phnum - 1; i >= 0; i--) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memmove((u8*)kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
}
for (ssize_t i = kernel_elf_header->e_phnum - 1; i >= 0; i--) {
auto& kernel_program_header = kernel_program_headers[i];
if (kernel_program_header.p_type != PT_LOAD)
continue;
__builtin_memset((u8*)kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
}
multiboot_info_ptr->mods_count--;
multiboot_info_ptr->mods_addr += sizeof(multiboot_module_entry_t);
auto adjust_by_load_base = [kernel_load_base](auto* ptr) {
return (decltype(ptr))((FlatPtr)ptr + kernel_load_base);
};
BootInfo info;
info.start_of_prekernel_image = adjust_by_load_base(start_of_prekernel_image);
info.end_of_prekernel_image = adjust_by_load_base(end_of_prekernel_image);
info.kernel_base = kernel_load_base;
info.multiboot_info_ptr = adjust_by_load_base(multiboot_info_ptr);
#if ARCH(X86_64)
info.gdt64ptr = (FlatPtr)gdt64ptr;
info.code64_sel = code64_sel;
info.boot_pml4t = (FlatPtr)adjust_by_load_base(boot_pml4t);
#endif
info.boot_pdpt = (FlatPtr)adjust_by_load_base(boot_pdpt);
info.boot_pd0 = (FlatPtr)adjust_by_load_base(boot_pd0);
info.boot_pd_kernel = (FlatPtr)adjust_by_load_base(boot_pd_kernel);
info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_load_base(boot_pd_kernel_pt1023);
info.kernel_cmdline = adjust_by_load_base(kernel_cmdline);
asm(
#if ARCH(I386)
"add %0, %%esp"
#else
"add %0, %%rsp"
#endif
::"g"(kernel_load_base));
// unmap the 0-1MB region
for (size_t i = 0; i < 256; i++)
boot_pd0_pts[i] = 0;
// unmap the end_of_prekernel_image - MAX_KERNEL_SIZE region
for (FlatPtr vaddr = (FlatPtr)end_of_prekernel_image; vaddr < MAX_KERNEL_SIZE; vaddr += PAGE_SIZE)
boot_pd0_pts[vaddr >> 12 & 0x1ff] = 0;
void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))kernel_elf_header->e_entry;
entry(*adjust_by_load_base(&info));
__builtin_unreachable();
}
// Define some Itanium C++ ABI methods to stop the linker from complaining.
// If we actually call these something has gone horribly wrong
void* __dso_handle __attribute__((visibility("hidden")));
}

View file

@ -0,0 +1,54 @@
ENTRY(start)
PHDRS
{
boot_text PT_LOAD ;
boot_bss PT_LOAD ;
text PT_LOAD ;
data PT_LOAD ;
bss PT_LOAD ;
}
SECTIONS
{
. = 0x00100000;
start_of_prekernel_image = .;
.boot_text ALIGN(4K) : AT (ADDR(.boot_text))
{
KEEP(*(.boot_text))
KEEP(*(.multiboot))
} :boot_text
.boot_bss ALIGN(4K) (NOLOAD) : AT (ADDR(.boot_bss))
{
KEEP(*(.page_tables))
KEEP(*(.stack))
*(.super_pages)
} :boot_bss
.text ALIGN(4K) : AT (ADDR(.text))
{
start_of_kernel_text = .;
*(.text*)
} :text
.rodata ALIGN(4K) : AT (ADDR(.rodata))
{
*(.rodata*)
} :data
.data ALIGN(4K) : AT (ADDR(.data))
{
*(.data*)
} :data
.bss ALIGN(4K) (NOLOAD) : AT (ADDR(.bss))
{
*(COMMON)
*(.bss)
} :bss
end_of_prekernel_image = .;
}

View file

@ -370,7 +370,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
if (out_of_memory) {
dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this);
} else {
if (ip >= KERNEL_BASE && g_kernel_symbols_available) {
if (ip >= kernel_base && g_kernel_symbols_available) {
auto* symbol = symbolicate_kernel_address(ip);
dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? symbol->name : "(k?)"), (symbol ? ip - symbol->address : 0));
} else {

View file

@ -7,21 +7,21 @@
#pragma once
#include <AK/Platform.h>
#ifdef __cplusplus
# include <AK/Types.h>
#endif
#define READONLY_AFTER_INIT __attribute__((section(".ro_after_init")))
#define UNMAP_AFTER_INIT NEVER_INLINE __attribute__((section(".unmap_after_init")))
#define KERNEL_BASE 0xC0000000
#define KERNEL_PD_OFFSET 0x3000000
#define KERNEL_PD_END (KERNEL_BASE + 0x31000000)
#define KERNEL_PT1024_BASE (KERNEL_BASE + 0x3FE00000)
#ifdef __cplusplus
extern "C" FlatPtr kernel_base;
#endif
#define KERNEL_PD_END (kernel_base + 0x31000000)
#define KERNEL_PT1024_BASE (kernel_base + 0x3FE00000)
#define KERNEL_QUICKMAP_PT (KERNEL_PT1024_BASE + 0x6000)
#define KERNEL_QUICKMAP_PD (KERNEL_PT1024_BASE + 0x7000)
#define KERNEL_QUICKMAP_PER_CPU_BASE (KERNEL_PT1024_BASE + 0x8000)
#define KERNEL_PHYSICAL_PAGES_BASE (KERNEL_BASE + KERNEL_PD_OFFSET)
#ifdef __cplusplus
static_assert(KERNEL_BASE % 0x1000000 == 0);
#endif
#define USER_RANGE_CEILING 0xBE000000

View file

@ -236,48 +236,6 @@ bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
return true;
}
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
{
size_t dest = (size_t)dest_ptr;
size_t src = (size_t)src_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
#if ARCH(I386)
asm volatile(
"rep movsl\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
#else
asm volatile(
"rep movsq\n"
: "=S"(src), "=D"(dest)
: "S"(src), "D"(dest), "c"(size_ts)
: "memory");
#endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
: "memory");
return dest_ptr;
}
void* memmove(void* dest, const void* src, size_t n)
{
if (dest < src)
return memcpy(dest, src, n);
u8* pd = (u8*)dest;
const u8* ps = (const u8*)src;
for (pd += n, ps += n; n--;)
*--pd = *--ps;
return dest;
}
const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
{
return AK::memmem(haystack, haystack_length, needle, needle_length);
@ -297,46 +255,6 @@ const void* memmem(const void* haystack, size_t haystack_length, const void* nee
return true;
}
void* memset(void* dest_ptr, int c, size_t n)
{
size_t dest = (size_t)dest_ptr;
// FIXME: Support starting at an unaligned address.
if (!(dest & 0x3) && n >= 12) {
size_t size_ts = n / sizeof(size_t);
size_t expanded_c = explode_byte((u8)c);
#if ARCH(I386)
asm volatile(
"rep stosl\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
#else
asm volatile(
"rep stosq\n"
: "=D"(dest)
: "D"(dest), "c"(size_ts), "a"(expanded_c)
: "memory");
#endif
n -= size_ts * sizeof(size_t);
if (n == 0)
return dest_ptr;
}
asm volatile(
"rep stosb\n"
: "=D"(dest), "=c"(n)
: "0"(dest), "1"(n), "a"(c)
: "memory");
return dest_ptr;
}
size_t strlen(const char* str)
{
size_t len = 0;
while (*(str++))
++len;
return len;
}
size_t strnlen(const char* str, size_t maxlen)
{
size_t len = 0;

View file

@ -22,6 +22,8 @@
#include <Kernel/VM/PhysicalRegion.h>
#include <Kernel/VM/SharedInodeVMObject.h>
extern u8* start_of_bootloader_image;
extern u8* end_of_bootloader_image;
extern u8* start_of_kernel_image;
extern u8* end_of_kernel_image;
extern FlatPtr start_of_kernel_text;
@ -34,6 +36,9 @@ extern FlatPtr end_of_unmap_after_init;
extern FlatPtr start_of_kernel_ksyms;
extern FlatPtr end_of_kernel_ksyms;
extern "C" void* boot_pd_kernel;
extern "C" void* boot_pd_kernel_pt1023;
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
extern size_t multiboot_copy_boot_modules_count;
@ -196,6 +201,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Bootloader, PhysicalAddress(virtual_to_low_physical(FlatPtr(start_of_bootloader_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(end_of_bootloader_image)))) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) });
if (multiboot_info_ptr->flags & 0x4) {
@ -334,8 +340,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
}
}
extern "C" PageDirectoryEntry boot_pd3[1024];
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
{
// We assume that the physical page range is contiguous and doesn't contain huge gaps!
@ -436,10 +440,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
unquickmap_page();
// Hook the page table into the kernel page directory
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
PhysicalAddress boot_pd_kernel_paddr(virtual_to_low_physical((FlatPtr)boot_pd_kernel));
u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd3_paddr));
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd_kernel_paddr));
PageDirectoryEntry& pde = pd[page_directory_index];
VERIFY(!pde.is_present()); // Nothing should be using this PD yet
@ -909,7 +913,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
return {};
}
fast_u32_fill((u32*)page->paddr().offset(KERNEL_BASE).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
fast_u32_fill((u32*)page->paddr().offset(kernel_base).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_system_memory_info.super_physical_pages_used;
return page;
}
@ -939,13 +943,11 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
Processor::flush_tlb(page_directory, vaddr, page_count);
}
extern "C" PageTableEntry boot_pd3_pt1023[1024];
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PD - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.get()) {
pte.set_physical_page_base(pd_paddr.get());
@ -971,7 +973,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
VERIFY(s_mm_lock.own_lock());
auto& mm_data = get_data();
auto& pte = boot_pd3_pt1023[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[(KERNEL_QUICKMAP_PT - KERNEL_PT1024_BASE) / PAGE_SIZE];
if (pte.physical_page_base() != pt_paddr.get()) {
pte.set_physical_page_base(pt_paddr.get());
pte.set_present(true);
@ -1002,7 +1004,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
auto& pte = boot_pd3_pt1023[pte_idx];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
if (pte.physical_page_base() != physical_address.get()) {
pte.set_physical_page_base(physical_address.get());
pte.set_present(true);
@ -1021,7 +1023,7 @@ void MemoryManager::unquickmap_page()
VERIFY(mm_data.m_quickmap_in_use.is_locked());
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
auto& pte = boot_pd3_pt1023[pte_idx];
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
pte.clear();
flush_tlb_local(vaddr);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);

View file

@ -43,16 +43,17 @@ constexpr FlatPtr page_round_down(FlatPtr x)
inline FlatPtr low_physical_to_virtual(FlatPtr physical)
{
return physical + KERNEL_BASE;
return physical + kernel_base;
}
inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
{
return virtual_ - KERNEL_BASE;
return virtual_ - kernel_base;
}
enum class UsedMemoryRangeType {
LowMemory = 0,
Bootloader,
Kernel,
BootModule,
PhysicalPages,
@ -60,6 +61,7 @@ enum class UsedMemoryRangeType {
static constexpr StringView UserMemoryRangeTypeNames[] {
"Low memory",
"Bootloader",
"Kernel",
"Boot module",
"Physical Pages"

View file

@ -6,12 +6,15 @@
#include <AK/Memory.h>
#include <AK/Singleton.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/Process.h>
#include <Kernel/Random.h>
#include <Kernel/Sections.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
extern u8* end_of_kernel_image;
namespace Kernel {
static AK::Singleton<HashMap<FlatPtr, PageDirectory*>> s_cr3_map;
@ -28,16 +31,19 @@ RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
return cr3_map().get(cr3).value_or({});
}
extern "C" FlatPtr kernel_base;
#if ARCH(X86_64)
extern "C" PageDirectoryEntry boot_pml4t[1024];
extern "C" void* boot_pml4t;
#endif
extern "C" PageDirectoryEntry* boot_pdpt[4];
extern "C" PageDirectoryEntry boot_pd0[1024];
extern "C" PageDirectoryEntry boot_pd3[1024];
extern "C" void* boot_pdpt;
extern "C" void* boot_pd0;
extern "C" void* boot_pd_kernel;
UNMAP_AFTER_INIT PageDirectory::PageDirectory()
{
m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + KERNEL_PD_OFFSET), KERNEL_PD_END - (KERNEL_BASE + KERNEL_PD_OFFSET));
// make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)&end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
}
@ -51,13 +57,13 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
#endif
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
PhysicalAddress boot_pd_kernel_paddr(virtual_to_low_physical((FlatPtr)boot_pd_kernel));
dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr);
dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr);
dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel_paddr);
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, MayReturnToFreeList::No);
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, MayReturnToFreeList::No);
m_directory_pages[(kernel_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel_paddr, MayReturnToFreeList::No);
}
PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
@ -83,15 +89,13 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
m_directory_table = MM.allocate_user_physical_page();
if (!m_directory_table)
return;
auto kernel_pd_index = (KERNEL_BASE >> 30) & 0xffu;
for (size_t i = 0; i < 4; i++) {
if (i == kernel_pd_index)
continue;
auto kernel_pd_index = (kernel_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
m_directory_pages[i] = MM.allocate_user_physical_page();
if (!m_directory_pages[i])
return;
}
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE)
// Share the top 1 GiB of kernel-only mappings (>=kernel_base)
m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
#if ARCH(X86_64)

View file

@ -86,7 +86,7 @@ public:
void set_mmap(bool mmap) { m_mmap = mmap; }
bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= KERNEL_BASE; }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= kernel_base; }
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);

View file

@ -39,6 +39,7 @@
#include <Kernel/Net/NetworkTask.h>
#include <Kernel/Net/NetworkingManagement.h>
#include <Kernel/Panic.h>
#include <Kernel/Prekernel/BootInfo.h>
#include <Kernel/Process.h>
#include <Kernel/ProcessExposed.h>
#include <Kernel/RTC.h>
@ -77,7 +78,6 @@ extern "C" u8* end_of_kernel_image;
multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
size_t multiboot_copy_boot_modules_count;
extern "C" const char kernel_cmdline[4096];
READONLY_AFTER_INIT bool g_in_early_boot;
namespace Kernel {
@ -89,7 +89,7 @@ static void setup_serial_debug();
// We declare them here to ensure their signatures don't accidentally change.
extern "C" void init_finished(u32 cpu) __attribute__((used));
extern "C" [[noreturn]] void init_ap(FlatPtr cpu, Processor* processor_info);
extern "C" [[noreturn]] void init();
extern "C" [[noreturn]] void init(BootInfo const&);
READONLY_AFTER_INIT VirtualConsole* tty0;
@ -105,11 +105,42 @@ static Processor s_bsp_processor; // global but let's keep it "private"
// Once multi-tasking is ready, we spawn a new thread that starts in the
// init_stage2() function. Initialization continues there.
extern "C" [[noreturn]] UNMAP_AFTER_INIT void init()
extern "C" {
u8 const* start_of_bootloader_image;
u8 const* end_of_bootloader_image;
__attribute__((section(".boot_bss"))) FlatPtr kernel_base;
#if ARCH(X86_64)
extern "C" u32 gdt64ptr;
extern "C" u16 code64_sel;
FlatPtr boot_pml4t;
#endif
FlatPtr boot_pdpt;
FlatPtr boot_pd0;
FlatPtr boot_pd_kernel;
FlatPtr boot_pd_kernel_pt1023;
const char* kernel_cmdline;
}
extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
{
g_in_early_boot = true;
setup_serial_debug();
multiboot_info_ptr = boot_info.multiboot_info_ptr;
start_of_bootloader_image = boot_info.start_of_prekernel_image;
end_of_bootloader_image = boot_info.end_of_prekernel_image;
kernel_base = boot_info.kernel_base;
#if ARCH(X86_64)
gdt64ptr = boot_info.gdt64ptr;
code64_sel = boot_info.code64_sel;
boot_pml4t = boot_info.boot_pml4t;
#endif
boot_pdpt = boot_info.boot_pdpt;
boot_pd0 = boot_info.boot_pd0;
boot_pd_kernel = boot_info.boot_pd_kernel;
boot_pd_kernel_pt1023 = boot_info.boot_pd_kernel_pt1023;
kernel_cmdline = boot_info.kernel_cmdline;
// We need to copy the command line before kmalloc is initialized,
// as it may overwrite parts of multiboot!
CommandLine::early_initialize(kernel_cmdline);

View file

@ -1,4 +1,4 @@
ENTRY(start)
ENTRY(init)
KERNEL_VIRTUAL_BASE = 0xc0000000;

View file

@ -1,11 +1,6 @@
#!/bin/sh
tmp=$(mktemp)
if [ -f Kernel32 ]; then
kernel_binary=Kernel32
else
kernel_binary=Kernel64
fi
nm -n $kernel_binary | grep -vE \\.Lubsan_data | awk '{ if ($2 != "a") print; }' | uniq > "$tmp"
nm -n Kernel | grep -vE \\.Lubsan_data | awk '{ if ($2 != "a") print; }' | uniq > "$tmp"
printf "%08x\n" "$(wc -l "$tmp" | cut -f1 -d' ')" > kernel.map
c++filt < "$tmp" >> kernel.map
rm -f "$tmp"

View file

@ -10,14 +10,12 @@
#
if [ "$SERENITY_ARCH" = "x86_64" ]; then
gdb_arch=i386:x86-64
kernel_binary=Kernel64
else
gdb_arch=i386:intel
kernel_binary=Kernel
fi
exec $SERENITY_KERNEL_DEBUGGER \
-ex "file $(dirname "$0")/../Build/${SERENITY_ARCH:-i686}/Kernel/$kernel_binary" \
-ex "file $(dirname "$0")/../Build/${SERENITY_ARCH:-i686}/Kernel/Kernel" \
-ex "set arch $gdb_arch" \
-ex 'target remote localhost:1234' \
-ex "source $(dirname "$0")/serenity_gdb.py" \

View file

@ -2,21 +2,25 @@ timeout=1
menuentry 'SerenityOS (normal)' {
root=hd0,5
multiboot /boot/Kernel root=/dev/hda4
multiboot /boot/Bootloader root=/dev/hda4
module /boot/Kernel
}
menuentry 'SerenityOS (text mode)' {
root=hd0,5
multiboot /boot/Kernel boot_mode=no-fbdev root=/dev/hda4
multiboot /boot/Bootloader boot_mode=no-fbdev root=/dev/hda4
module /boot/Kernel
}
menuentry 'SerenityOS (No ACPI)' {
root=hd0,5
multiboot /boot/Kernel root=/dev/hda4 acpi=off
multiboot /boot/Bootloader root=/dev/hda4 acpi=off
module /boot/Kernel
}
menuentry 'SerenityOS (with serial debug)' {
root=hd0,5
multiboot /boot/Kernel serial_debug root=/dev/hda4
root=hd0,5
multiboot /boot/Bootloader serial_debug root=/dev/hda4
module /boot/Kernel
}

View file

@ -2,20 +2,24 @@ timeout=1
menuentry 'SerenityOS (normal)' {
root=hd0,2
multiboot /boot/Kernel root=/dev/hda2
multiboot /boot/Bootloader root=/dev/hda2
module /boot/Kernel
}
menuentry 'SerenityOS (text mode)' {
root=hd0,2
multiboot /boot/Kernel boot_mode=no-fbdev root=/dev/hda2
multiboot /boot/Bootloader boot_mode=no-fbdev root=/dev/hda2
module /boot/Kernel
}
menuentry 'SerenityOS (No ACPI)' {
root=hd0,2
multiboot /boot/Kernel root=/dev/hda2 acpi=off
multiboot /boot/Bootloader root=/dev/hda2 acpi=off
module /boot/Kernel
}
menuentry 'SerenityOS (with serial debug)' {
root=hd0,2
multiboot /boot/Kernel serial_debug root=/dev/hda2
root=hd0,2
multiboot /boot/Bootloader serial_debug root=/dev/hda2
module /boot/Kernel
}

View file

@ -2,20 +2,24 @@ timeout=1
menuentry 'SerenityOS (normal)' {
root=hd0,1
multiboot /boot/Kernel root=/dev/hda1
multiboot /boot/Bootloader root=/dev/hda1
module /boot/Kernel
}
menuentry 'SerenityOS (text mode)' {
root=hd0,1
multiboot /boot/Kernel boot_mode=no-fbdev root=/dev/hda1
multiboot /boot/Bootloader boot_mode=no-fbdev root=/dev/hda1
module /boot/Kernel
}
menuentry 'SerenityOS (No ACPI)' {
root=hd0,1
multiboot /boot/Kernel root=/dev/hda1 acpi=off
multiboot /boot/Bootloader root=/dev/hda1 acpi=off
module /boot/Kernel
}
menuentry 'SerenityOS (with serial debug)' {
root=hd0,1
multiboot /boot/Kernel serial_debug root=/dev/hda1
multiboot /boot/Bootloader serial_debug root=/dev/hda1
module /boot/Kernel
}

View file

@ -204,7 +204,8 @@ elif [ "$SERENITY_RUN" = "qn" ]; then
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_ARGS \
-device e1000 \
-kernel Kernel/Kernel \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
elif [ "$SERENITY_RUN" = "qtap" ]; then
# Meta/run.sh qtap: qemu with tap
@ -216,7 +217,8 @@ elif [ "$SERENITY_RUN" = "qtap" ]; then
$SERENITY_PACKET_LOGGING_ARG \
-netdev tap,ifname=tap0,id=br0 \
-device e1000,netdev=br0 \
-kernel Kernel/Kernel \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
sudo ip tuntap del dev tap0 mode tap
elif [ "$SERENITY_RUN" = "qgrub" ]; then
@ -235,8 +237,22 @@ elif [ "$SERENITY_RUN" = "q35" ]; then
$SERENITY_VIRT_TECH_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23 \
-device e1000,netdev=breh \
-kernel Kernel/Kernel \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
elif [ "$SERENITY_RUN" = "bootloader_test" ]; then
# Meta/run.sh q35: qemu (q35 chipset) with SerenityOS
echo "Starting SerenityOS with QEMU Q35 machine, Commandline: ${SERENITY_KERNEL_CMDLINE}"
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_Q35_ARGS \
$SERENITY_VIRT_TECH_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23 \
-device e1000,netdev=breh \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}" \
-no-reboot \
-no-shutdown
elif [ "$SERENITY_RUN" = "ci" ]; then
# Meta/run.sh ci: qemu in text mode
echo "Running QEMU in CI"
@ -252,7 +268,8 @@ elif [ "$SERENITY_RUN" = "ci" ]; then
-nographic \
-display none \
-debugcon file:debug.log \
-kernel Kernel/Kernel \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
else
# Meta/run.sh: qemu with user networking
@ -262,6 +279,7 @@ else
$SERENITY_PACKET_LOGGING_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23,hostfwd=tcp:127.0.0.1:8000-10.0.2.15:8000,hostfwd=tcp:127.0.0.1:2222-10.0.2.15:22 \
-device e1000,netdev=breh \
-kernel Kernel/Kernel \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
fi