Pārlūkot izejas kodu

Kernel: Move kernel above the 3GB virtual address mark

The kernel and its static data structures are no longer identity-mapped
in the bottom 8MB of the address space, but instead move above 3GB.

The first 8MB above 3GB are pseudo-identity-mapped to the bottom 8MB of
the physical address space. But things don't have to stay this way!

Thanks to Jesse who made an earlier attempt at this, it was really easy
to get device drivers working once the page tables were in place! :^)

Fixes #734.
Andreas Kling 5 gadi atpakaļ
vecāks
revīzija
e362b56b4f

+ 0 - 1
Kernel/Arch/i386/APIC.cpp

@@ -146,7 +146,6 @@ bool init()
     g_apic_base = apic_base.as_ptr();
     g_apic_base = apic_base.as_ptr();
     
     
     // copy ap init code to P8000
     // copy ap init code to P8000
-    MM.map_for_kernel(VirtualAddress(0x8000), PhysicalAddress(0x8000));
     memcpy(reinterpret_cast<u8*>(0x8000), reinterpret_cast<const u8*>(apic_ap_start), apic_ap_start_size);
     memcpy(reinterpret_cast<u8*>(0x8000), reinterpret_cast<const u8*>(apic_ap_start), apic_ap_start_size);
     return true;
     return true;
 }
 }

+ 120 - 6
Kernel/Arch/i386/Boot/boot.S

@@ -31,10 +31,20 @@ stack_bottom:
 .skip 32768
 .skip 32768
 stack_top:
 stack_top:
 
 
-.section .page_tables
+.section .page_tables, "aw", @nobits
 .align 4096
 .align 4096
-page_tables_start:
-.skip 4096*9
+.global boot_pdpt
+boot_pdpt:
+.skip 4096
+.global boot_pd0
+boot_pd0:
+.skip 4096
+.global boot_pd3
+boot_pd3:
+.skip 4096
+.global boot_pd3_pde1023_pt
+boot_pd3_pde1023_pt:
+.skip 4096
 
 
 .section .text
 .section .text
 
 
@@ -47,17 +57,121 @@ page_tables_start:
 .extern multiboot_info_ptr
 .extern multiboot_info_ptr
 .type multiboot_info_ptr, @object
 .type multiboot_info_ptr, @object
 
 
+/*
+    construct the following (32-bit PAE) page table layout:
+
+pdpt
+
+    0: boot_pd0 (0-1GB)
+    1: n/a      (1-2GB)
+    2: n/a      (2-3GB)
+    3: boot_pd3 (3-4GB)
+
+boot_pd0 : 512 pde's
+
+    0: (0-2MB) (id 2MB page)
+    1: (2-4MB) (id 2MB page)
+    2: (4-6MB) (id 2MB page)
+    3: (6-8MB) (id 2MB page)
+
+boot_pd3 : 512 pde's
+
+    0: boot_pd3_pde0 (3072-3074MB) (pseudo)
+    1: boot_pd3_pde1 (3074-3076MB) (pseudo)
+    2: boot_pd3_pde2 (3076-3078MB) (pseudo)
+    3: boot_pd3_pde3 (3078-3080MB) (pseudo)
+    4: boot_pd3_pde1023_pt (4094-4096MB) (for page table mappings)
+*/
+
 start:
 start:
     cli
     cli
     cld
     cld
 
 
+    /* clear pdpt */
+    movl $(boot_pdpt - 0xc0000000), %edi
+    movl $1024, %ecx
+    xorl %eax, %eax
+    rep stosl
+
+    /* set up pdpt[0] and pdpt[3] */
+    movl $(boot_pdpt - 0xc0000000), %edi
+    movl $((boot_pd0 - 0xc0000000) + 1), 0(%edi)
+    movl $((boot_pd3 - 0xc0000000) + 1), 24(%edi)
+
+    /* clear pd0 */
+    movl $(boot_pd0 - 0xc0000000), %edi
+    movl $1024, %ecx
+    xorl %eax, %eax
+    rep stosl
+
+    /* identity map bottom 8MB using 2MB pages (only PDE, no PTE) */
+    movl $4, %ecx
+    xorl %eax, %eax
+    movl $(boot_pd0 - 0xc0000000), %edi
+1:
+    movl %eax, 0(%edi)
+    /* PS(2MB) + R/W + Present */
+    orl $0x83, 0(%edi)
+
+    addl $8, %edi
+    addl $(1048576 * 2), %eax
+    loop 1b
+
+    /* clear pd3 */
+    movl $(boot_pd3 - 0xc0000000), %edi
+    movl $1024, %ecx
+    xorl %eax, %eax
+    rep stosl
+
+    /* pseudo-identity map first 8MB above 3GB mark using 2MB pages again */
+    movl $4, %ecx
+    xorl %eax, %eax
+    movl $(boot_pd3 - 0xc0000000), %edi
+1:
+    movl %eax, 0(%edi)
+    /* PS(2MB) + R/W + Present */
+    orl $0x83, 0(%edi)
+
+    addl $8, %edi
+    addl $(1048576 * 2), %eax
+    loop 1b
+
+    /* create an empty page table for the top 2MB at the 4GB mark */
+    movl $(boot_pd3 - 0xc0000000), %edi
+    movl $(boot_pd3_pde1023_pt - 0xc0000000), 4088(%edi)
+    orl $0x3, 4088(%edi)
+    movl $0, 4092(%edi)
+
+    /* point CR3 to PDPT */
+    movl $(boot_pdpt - 0xc0000000), %eax
+    movl %eax, %cr3
+
+    /* enable PAE + PSE */
+    movl %cr4, %eax
+    orl $0x60, %eax
+    movl %eax, %cr4
+
+    /* enable PG */
+    movl %cr0, %eax
+    orl $0x80000000, %eax
+    movl %eax, %cr0
+
+    /* jmp to an address above the 3GB mark */
+    push %cs
+    push $1f
+    retf
+1:
+
+    movl %cr3, %eax
+    movl %eax, %cr3
+
+    /* set up initial stack and jump into C++ land */
     mov $stack_top, %esp
     mov $stack_top, %esp
-
     and $-16, %esp
     and $-16, %esp
 
 
-    mov %ebx, multiboot_info_ptr
+    addl $0xc0000000, %ebx
+    movl %ebx, multiboot_info_ptr
 
 
-    pushl $page_tables_start
     call init
     call init
     add $4, %esp
     add $4, %esp
 
 

+ 17 - 2
Kernel/Arch/i386/CPU.cpp

@@ -146,6 +146,20 @@ static void dump(const RegisterDump& regs)
     kprintf("eax=%08x ebx=%08x ecx=%08x edx=%08x\n", regs.eax, regs.ebx, regs.ecx, regs.edx);
     kprintf("eax=%08x ebx=%08x ecx=%08x edx=%08x\n", regs.eax, regs.ebx, regs.ecx, regs.edx);
     kprintf("ebp=%08x esp=%08x esi=%08x edi=%08x\n", regs.ebp, esp, regs.esi, regs.edi);
     kprintf("ebp=%08x esp=%08x esi=%08x edi=%08x\n", regs.ebp, esp, regs.esi, regs.edi);
 
 
+    u32 cr0;
+    asm("movl %%cr0, %%eax"
+        : "=a"(cr0));
+    u32 cr2;
+    asm("movl %%cr2, %%eax"
+        : "=a"(cr2));
+    u32 cr3;
+    asm("movl %%cr3, %%eax"
+        : "=a"(cr3));
+    u32 cr4;
+    asm("movl %%cr4, %%eax"
+        : "=a"(cr4));
+    kprintf("cr0=%08x cr2=%08x cr3=%08x cr4=%08x\n", cr0, cr2, cr3, cr4);
+
     if (current && current->process().validate_read((void*)regs.eip, 8)) {
     if (current && current->process().validate_read((void*)regs.eip, 8)) {
         SmapDisabler disabler;
         SmapDisabler disabler;
         u8* codeptr = (u8*)regs.eip;
         u8* codeptr = (u8*)regs.eip;
@@ -221,7 +235,7 @@ EH_ENTRY(14, page_fault);
 void page_fault_handler(RegisterDump regs)
 void page_fault_handler(RegisterDump regs)
 {
 {
     clac();
     clac();
-    ASSERT(current);
+    //ASSERT(current);
 
 
     u32 fault_address;
     u32 fault_address;
     asm("movl %%cr2, %%eax"
     asm("movl %%cr2, %%eax"
@@ -232,12 +246,13 @@ void page_fault_handler(RegisterDump regs)
         : "=a"(fault_page_directory));
         : "=a"(fault_page_directory));
 
 
 #ifdef PAGE_FAULT_DEBUG
 #ifdef PAGE_FAULT_DEBUG
-    dbgprintf("%s(%u): ring%u %s page fault in PD=%x, %s V%08x\n",
+    dbgprintf("%s(%u): ring%u %s page fault in PD=%x, %s%s V%08x\n",
         current ? current->process().name().characters() : "(none)",
         current ? current->process().name().characters() : "(none)",
         current ? current->pid() : 0,
         current ? current->pid() : 0,
         regs.cs & 3,
         regs.cs & 3,
         regs.exception_code & 1 ? "PV" : "NP",
         regs.exception_code & 1 ? "PV" : "NP",
         fault_page_directory,
         fault_page_directory,
+        regs.exception_code & 8 ? "reserved-bit " : "",
         regs.exception_code & 2 ? "write" : "read",
         regs.exception_code & 2 ? "write" : "read",
         fault_address);
         fault_address);
 #endif
 #endif

+ 6 - 0
Kernel/Arch/i386/CPU.h

@@ -95,6 +95,8 @@ public:
         m_raw |= value & 0xfffff000;
         m_raw |= value & 0xfffff000;
     }
     }
 
 
+    void clear() { m_raw = 0; }
+
     u64 raw() const { return m_raw; }
     u64 raw() const { return m_raw; }
     void copy_from(Badge<PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
     void copy_from(Badge<PageDirectory>, const PageDirectoryEntry& other) { m_raw = other.m_raw; }
 
 
@@ -104,6 +106,7 @@ public:
         UserSupervisor = 1 << 2,
         UserSupervisor = 1 << 2,
         WriteThrough = 1 << 3,
         WriteThrough = 1 << 3,
         CacheDisabled = 1 << 4,
         CacheDisabled = 1 << 4,
+        Huge = 1 << 7,
         Global = 1 << 8,
         Global = 1 << 8,
         NoExecute = 0x8000000000000000ULL,
         NoExecute = 0x8000000000000000ULL,
     };
     };
@@ -114,6 +117,9 @@ public:
     bool is_user_allowed() const { return raw() & UserSupervisor; }
     bool is_user_allowed() const { return raw() & UserSupervisor; }
     void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
     void set_user_allowed(bool b) { set_bit(UserSupervisor, b); }
 
 
+    bool is_huge() const { return raw() & Huge; }
+    void set_huge(bool b) { set_bit(Huge, b); }
+
     bool is_writable() const { return raw() & ReadWrite; }
     bool is_writable() const { return raw() & ReadWrite; }
     void set_writable(bool b) { set_bit(ReadWrite, b); }
     void set_writable(bool b) { set_bit(ReadWrite, b); }
 
 

+ 13 - 11
Kernel/Devices/PATAChannel.cpp

@@ -102,6 +102,8 @@ PATAChannel::PATAChannel(ChannelType type, bool force_pio)
     m_dma_enabled.resource() = true;
     m_dma_enabled.resource() = true;
     ProcFS::add_sys_bool("ide_dma", m_dma_enabled);
     ProcFS::add_sys_bool("ide_dma", m_dma_enabled);
 
 
+    m_prdt_page = MM.allocate_supervisor_physical_page();
+
     initialize(force_pio);
     initialize(force_pio);
     detect_disks();
     detect_disks();
 }
 }
@@ -131,7 +133,7 @@ void PATAChannel::initialize(bool force_pio)
 
 
     // Let's try to set up DMA transfers.
     // Let's try to set up DMA transfers.
     PCI::enable_bus_mastering(m_pci_address);
     PCI::enable_bus_mastering(m_pci_address);
-    m_prdt.end_of_table = 0x8000;
+    prdt().end_of_table = 0x8000;
     m_bus_master_base = PCI::get_BAR4(m_pci_address) & 0xfffc;
     m_bus_master_base = PCI::get_BAR4(m_pci_address) & 0xfffc;
     m_dma_buffer_page = MM.allocate_supervisor_physical_page();
     m_dma_buffer_page = MM.allocate_supervisor_physical_page();
     kprintf("PATAChannel: Bus master IDE: I/O @ %x\n", m_bus_master_base);
     kprintf("PATAChannel: Bus master IDE: I/O @ %x\n", m_bus_master_base);
@@ -253,16 +255,16 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, u8* outbuf, bool
         current->pid(), lba, count, outbuf);
         current->pid(), lba, count, outbuf);
 #endif
 #endif
 
 
-    m_prdt.offset = m_dma_buffer_page->paddr();
-    m_prdt.size = 512 * count;
+    prdt().offset = m_dma_buffer_page->paddr();
+    prdt().size = 512 * count;
 
 
-    ASSERT(m_prdt.size <= PAGE_SIZE);
+    ASSERT(prdt().size <= PAGE_SIZE);
 
 
     // Stop bus master
     // Stop bus master
     IO::out8(m_bus_master_base, 0);
     IO::out8(m_bus_master_base, 0);
 
 
     // Write the PRDT location
     // Write the PRDT location
-    IO::out32(m_bus_master_base + 4, (u32)&m_prdt);
+    IO::out32(m_bus_master_base + 4, m_prdt_page->paddr().get());
 
 
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
@@ -310,7 +312,7 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, u8* outbuf, bool
     if (m_device_error)
     if (m_device_error)
         return false;
         return false;
 
 
-    memcpy(outbuf, m_dma_buffer_page->paddr().as_ptr(), 512 * count);
+    memcpy(outbuf, m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), 512 * count);
 
 
     // I read somewhere that this may trigger a cache flush so let's do it.
     // I read somewhere that this may trigger a cache flush so let's do it.
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
@@ -326,18 +328,18 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const u8* inbuf
         current->pid(), lba, count, inbuf);
         current->pid(), lba, count, inbuf);
 #endif
 #endif
 
 
-    m_prdt.offset = m_dma_buffer_page->paddr();
-    m_prdt.size = 512 * count;
+    prdt().offset = m_dma_buffer_page->paddr();
+    prdt().size = 512 * count;
 
 
-    memcpy(m_dma_buffer_page->paddr().as_ptr(), inbuf, 512 * count);
+    memcpy(m_dma_buffer_page->paddr().offset(0xc0000000).as_ptr(), inbuf, 512 * count);
 
 
-    ASSERT(m_prdt.size <= PAGE_SIZE);
+    ASSERT(prdt().size <= PAGE_SIZE);
 
 
     // Stop bus master
     // Stop bus master
     IO::out8(m_bus_master_base, 0);
     IO::out8(m_bus_master_base, 0);
 
 
     // Write the PRDT location
     // Write the PRDT location
-    IO::out32(m_bus_master_base + 4, (u32)&m_prdt);
+    IO::out32(m_bus_master_base + 4, m_prdt_page->paddr().get());
 
 
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware.
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);
     IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6);

+ 2 - 1
Kernel/Devices/PATAChannel.h

@@ -65,7 +65,8 @@ private:
     WaitQueue m_irq_queue;
     WaitQueue m_irq_queue;
 
 
     PCI::Address m_pci_address;
     PCI::Address m_pci_address;
-    PhysicalRegionDescriptor m_prdt;
+    PhysicalRegionDescriptor& prdt() { return *reinterpret_cast<PhysicalRegionDescriptor*>(m_prdt_page->paddr().offset(0xc0000000).as_ptr()); }
+    RefPtr<PhysicalPage> m_prdt_page;
     RefPtr<PhysicalPage> m_dma_buffer_page;
     RefPtr<PhysicalPage> m_dma_buffer_page;
     u16 m_bus_master_base { 0 };
     u16 m_bus_master_base { 0 };
     Lockable<bool> m_dma_enabled;
     Lockable<bool> m_dma_enabled;

+ 2 - 2
Kernel/Heap/kmalloc.cpp

@@ -20,11 +20,11 @@ struct [[gnu::packed]] allocation_t
     size_t nchunk;
     size_t nchunk;
 };
 };
 
 
-#define BASE_PHYSICAL (4 * MB)
+#define BASE_PHYSICAL (0xc0000000 + (4 * MB))
 #define CHUNK_SIZE 8
 #define CHUNK_SIZE 8
 #define POOL_SIZE (3 * MB)
 #define POOL_SIZE (3 * MB)
 
 
-#define ETERNAL_BASE_PHYSICAL (2 * MB)
+#define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
 #define ETERNAL_RANGE_SIZE (2 * MB)
 #define ETERNAL_RANGE_SIZE (2 * MB)
 
 
 static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
 static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];

+ 1 - 1
Kernel/Makefile

@@ -123,7 +123,7 @@ SUBPROJECT_CXXFLAGS += -nostdlib -nostdinc -nostdinc++ -g3
 SUBPROJECT_CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/9.2.0/
 SUBPROJECT_CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/9.2.0/
 SUBPROJECT_CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/9.2.0/i686-pc-serenity/
 SUBPROJECT_CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/9.2.0/i686-pc-serenity/
 
 
-LDFLAGS += -Ttext 0x100000 -Wl,-T linker.ld -nostdlib -lgcc -lstdc++ -g3
+LDFLAGS += -Wl,-T linker.ld -nostdlib -lgcc -lstdc++ -g3
 
 
 all: $(PROGRAM) $(MODULE_OBJS) kernel.map
 all: $(PROGRAM) $(MODULE_OBJS) kernel.map
 
 

+ 13 - 6
Kernel/Net/E1000NetworkAdapter.cpp

@@ -234,11 +234,14 @@ void E1000NetworkAdapter::initialize_rx_descriptors()
     m_rx_descriptors = (e1000_rx_desc*)ptr;
     m_rx_descriptors = (e1000_rx_desc*)ptr;
     for (int i = 0; i < number_of_rx_descriptors; ++i) {
     for (int i = 0; i < number_of_rx_descriptors; ++i) {
         auto& descriptor = m_rx_descriptors[i];
         auto& descriptor = m_rx_descriptors[i];
-        descriptor.addr = (u64)kmalloc_eternal(8192 + 16);
+        auto addr = (u32)kmalloc_eternal(8192 + 16);
+        if (addr % 16)
+            addr = (addr + 16) - (addr % 16);
+        descriptor.addr = addr - 0xc0000000;
         descriptor.status = 0;
         descriptor.status = 0;
     }
     }
 
 
-    out32(REG_RXDESCLO, ptr);
+    out32(REG_RXDESCLO, (u32)ptr - 0xc0000000);
     out32(REG_RXDESCHI, 0);
     out32(REG_RXDESCHI, 0);
     out32(REG_RXDESCLEN, number_of_rx_descriptors * sizeof(e1000_rx_desc));
     out32(REG_RXDESCLEN, number_of_rx_descriptors * sizeof(e1000_rx_desc));
     out32(REG_RXDESCHEAD, 0);
     out32(REG_RXDESCHEAD, 0);
@@ -256,11 +259,14 @@ void E1000NetworkAdapter::initialize_tx_descriptors()
     m_tx_descriptors = (e1000_tx_desc*)ptr;
     m_tx_descriptors = (e1000_tx_desc*)ptr;
     for (int i = 0; i < number_of_tx_descriptors; ++i) {
     for (int i = 0; i < number_of_tx_descriptors; ++i) {
         auto& descriptor = m_tx_descriptors[i];
         auto& descriptor = m_tx_descriptors[i];
-        descriptor.addr = (u64)kmalloc_eternal(8192 + 16);
+        auto addr = (u32)kmalloc_eternal(8192 + 16);
+        if (addr % 16)
+            addr = (addr + 16) - (addr % 16);
+        descriptor.addr = addr - 0xc0000000;
         descriptor.cmd = 0;
         descriptor.cmd = 0;
     }
     }
 
 
-    out32(REG_TXDESCLO, ptr);
+    out32(REG_TXDESCLO, (u32)ptr - 0xc0000000);
     out32(REG_TXDESCHI, 0);
     out32(REG_TXDESCHI, 0);
     out32(REG_TXDESCLEN, number_of_tx_descriptors * sizeof(e1000_tx_desc));
     out32(REG_TXDESCLEN, number_of_tx_descriptors * sizeof(e1000_tx_desc));
     out32(REG_TXDESCHEAD, 0);
     out32(REG_TXDESCHEAD, 0);
@@ -348,7 +354,8 @@ void E1000NetworkAdapter::send_raw(const u8* data, int length)
 #endif
 #endif
     auto& descriptor = m_tx_descriptors[tx_current];
     auto& descriptor = m_tx_descriptors[tx_current];
     ASSERT(length <= 8192);
     ASSERT(length <= 8192);
-    memcpy((void*)descriptor.addr, data, length);
+    auto *vptr = (void*)(descriptor.addr + 0xc0000000);
+    memcpy(vptr, data, length);
     descriptor.length = length;
     descriptor.length = length;
     descriptor.status = 0;
     descriptor.status = 0;
     descriptor.cmd = CMD_EOP | CMD_IFCS | CMD_RS;
     descriptor.cmd = CMD_EOP | CMD_IFCS | CMD_RS;
@@ -381,7 +388,7 @@ void E1000NetworkAdapter::receive()
         rx_current = (rx_current + 1) % number_of_rx_descriptors;
         rx_current = (rx_current + 1) % number_of_rx_descriptors;
         if (!(m_rx_descriptors[rx_current].status & 1))
         if (!(m_rx_descriptors[rx_current].status & 1))
             break;
             break;
-        auto* buffer = (u8*)m_rx_descriptors[rx_current].addr;
+        auto* buffer = (u8*)(m_rx_descriptors[rx_current].addr + 0xc0000000);
         u16 length = m_rx_descriptors[rx_current].length;
         u16 length = m_rx_descriptors[rx_current].length;
 #ifdef E1000_DEBUG
 #ifdef E1000_DEBUG
         kprintf("E1000: Received 1 packet @ %p (%u) bytes!\n", buffer, length);
         kprintf("E1000: Received 1 packet @ %p (%u) bytes!\n", buffer, length);

+ 1 - 1
Kernel/TTY/VirtualConsole.cpp

@@ -32,7 +32,7 @@ void VirtualConsole::flush_vga_cursor()
 
 
 void VirtualConsole::initialize()
 void VirtualConsole::initialize()
 {
 {
-    s_vga_buffer = (u8*)0xb8000;
+    s_vga_buffer = (u8*)0xc00b8000;
     memset(s_consoles, 0, sizeof(s_consoles));
     memset(s_consoles, 0, sizeof(s_consoles));
     s_active_console = -1;
     s_active_console = -1;
 }
 }

+ 79 - 56
Kernel/VM/MemoryManager.cpp

@@ -21,13 +21,9 @@ MemoryManager& MM
     return *s_the;
     return *s_the;
 }
 }
 
 
-MemoryManager::MemoryManager(u32 physical_address_for_kernel_page_tables)
+MemoryManager::MemoryManager()
 {
 {
-    m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(physical_address_for_kernel_page_tables));
-    for (size_t i = 0; i < 4; ++i) {
-        m_low_page_tables[i] = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * (5 + i));
-        memset(m_low_page_tables[i], 0, PAGE_SIZE);
-    }
+    m_kernel_page_directory = PageDirectory::create_kernel_page_directory();
 
 
     initialize_paging();
     initialize_paging();
 
 
@@ -49,32 +45,11 @@ void MemoryManager::initialize_paging()
     dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
     dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
 #endif
 #endif
 
 
-#ifdef MM_DEBUG
-    dbgprintf("MM: Protect against null dereferences\n");
-#endif
-    // Make null dereferences crash.
-    map_protected(VirtualAddress(0), PAGE_SIZE);
-
-#ifdef MM_DEBUG
-    dbgprintf("MM: Identity map bottom 8MB\n");
-#endif
-    // The bottom 8 MB (except for the null page) are identity mapped & supervisor only.
-    // Every process shares these mappings.
-    create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (8 * MB) - PAGE_SIZE);
-
-    // Disable execution from 0MB through 1MB (BIOS data, legacy things, ...)
-    if (g_cpu_supports_nx) {
-        for (size_t i = 0; i < (1 * MB); i += PAGE_SIZE) {
-            auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
-            pte.set_execute_disabled(true);
-        }
-        // Disable execution from 2MB through 8MB (kmalloc, kmalloc_eternal, slabs, page tables, ...)
-        for (size_t i = 1; i < 4; ++i) {
-            auto& pte = kernel_page_directory().table().directory(0)[i];
-            pte.set_execute_disabled(true);
-        }
-    }
+    // Disable execution from 0MB through 2MB (BIOS data, legacy things, ...)
+    if (g_cpu_supports_nx)
+        quickmap_pd(kernel_page_directory(), 0)[0].set_execute_disabled(true);
 
 
+#if 0
     // Disable writing to the kernel text and rodata segments.
     // Disable writing to the kernel text and rodata segments.
     extern u32 start_of_kernel_text;
     extern u32 start_of_kernel_text;
     extern u32 start_of_kernel_data;
     extern u32 start_of_kernel_data;
@@ -91,17 +66,7 @@ void MemoryManager::initialize_paging()
             pte.set_execute_disabled(true);
             pte.set_execute_disabled(true);
         }
         }
     }
     }
-
-    // FIXME: We should move everything kernel-related above the 0xc0000000 virtual mark.
-
-    // Basic physical memory map:
-    // 0      -> 1 MB           We're just leaving this alone for now.
-    // 1      -> 2 MB           Kernel image.
-    // (last page before 2MB)   Used by quickmap_page().
-    // 2 MB   -> 4 MB           kmalloc_eternal() space.
-    // 4 MB   -> 7 MB           kmalloc() space.
-    // 7 MB   -> 8 MB           Supervisor physical pages (available for allocation!)
-    // 8 MB   -> MAX            Userspace physical pages (available for allocation!)
+#endif
 
 
     // Basic virtual memory map:
     // Basic virtual memory map:
     // 0 -> 4 KB                Null page (so nullptr dereferences crash!)
     // 0 -> 4 KB                Null page (so nullptr dereferences crash!)
@@ -109,15 +74,16 @@ void MemoryManager::initialize_paging()
     // 8 MB -> 3 GB             Available to userspace.
     // 8 MB -> 3 GB             Available to userspace.
     // 3GB  -> 4 GB             Kernel-only virtual address space (>0xc0000000)
     // 3GB  -> 4 GB             Kernel-only virtual address space (>0xc0000000)
 
 
+    m_quickmap_addr = VirtualAddress(0xffe00000);
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
     dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
     dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
 #endif
 #endif
-    m_quickmap_addr = VirtualAddress((2 * MB) - PAGE_SIZE);
 
 
     RefPtr<PhysicalRegion> region;
     RefPtr<PhysicalRegion> region;
     bool region_is_super = false;
     bool region_is_super = false;
 
 
-    for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
+    auto* mmap = (multiboot_memory_map_t*)(0xc0000000 + multiboot_info_ptr->mmap_addr);
+    for (; (unsigned long)mmap < (0xc0000000 + multiboot_info_ptr->mmap_addr) + (multiboot_info_ptr->mmap_length); mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
         kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
         kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
             (u32)(mmap->addr >> 32),
             (u32)(mmap->addr >> 32),
             (u32)(mmap->addr & 0xffffffff),
             (u32)(mmap->addr & 0xffffffff),
@@ -221,6 +187,7 @@ void MemoryManager::initialize_paging()
 
 
     if (g_cpu_supports_smap) {
     if (g_cpu_supports_smap) {
         // Turn on CR4.SMAP
         // Turn on CR4.SMAP
+        kprintf("x86: Enabling SMAP\n");
         asm volatile(
         asm volatile(
             "mov %cr4, %eax\n"
             "mov %cr4, %eax\n"
             "orl $0x200000, %eax\n"
             "orl $0x200000, %eax\n"
@@ -261,18 +228,14 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
     u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
     u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
     u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
     u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
 
 
-    PageDirectoryEntry& pde = page_directory.table().directory(page_directory_table_index)[page_directory_index];
+    auto* pd = quickmap_pd(page_directory, page_directory_table_index);
+    PageDirectoryEntry& pde = pd[page_directory_index];
     if (!pde.is_present()) {
     if (!pde.is_present()) {
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
         dbgprintf("MM: PDE %u not present (requested for V%p), allocating\n", page_directory_index, vaddr.get());
         dbgprintf("MM: PDE %u not present (requested for V%p), allocating\n", page_directory_index, vaddr.get());
 #endif
 #endif
-        if (page_directory_table_index == 0 && page_directory_index < 4) {
-            ASSERT(&page_directory == m_kernel_page_directory);
-            pde.set_page_table_base((u32)m_low_page_tables[page_directory_index]);
-            pde.set_user_allowed(false);
-            pde.set_present(true);
-            pde.set_writable(true);
-            pde.set_global(true);
+        if (page_directory_table_index == 3 && page_directory_index < 4) {
+            ASSERT_NOT_REACHED();
         } else {
         } else {
             auto page_table = allocate_supervisor_physical_page();
             auto page_table = allocate_supervisor_physical_page();
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
@@ -292,7 +255,13 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
             page_directory.m_physical_pages.set(page_directory_index, move(page_table));
             page_directory.m_physical_pages.set(page_directory_index, move(page_table));
         }
         }
     }
     }
-    return pde.page_table_base()[page_table_index];
+
+    //if (&page_directory != &kernel_page_directory() && page_directory_table_index != 3) {
+    return quickmap_pt(PhysicalAddress((u32)pde.page_table_base()))[page_table_index];
+    //}
+
+    auto* phys_ptr = &pde.page_table_base()[page_table_index];
+    return *(PageTableEntry*)((u8*)phys_ptr + 0xc0000000);
 }
 }
 
 
 void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
 void MemoryManager::map_protected(VirtualAddress vaddr, size_t length)
@@ -325,9 +294,9 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu
     }
     }
 }
 }
 
 
-void MemoryManager::initialize(u32 physical_address_for_kernel_page_tables)
+void MemoryManager::initialize()
 {
 {
-    s_the = new MemoryManager(physical_address_for_kernel_page_tables);
+    s_the = new MemoryManager;
 }
 }
 
 
 Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
 Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
@@ -349,6 +318,29 @@ Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress v
             return &region;
             return &region;
     }
     }
     dbg() << process << " Couldn't find user region for " << vaddr;
     dbg() << process << " Couldn't find user region for " << vaddr;
+    if (auto* kreg = kernel_region_from_vaddr(vaddr)) {
+        dbg() << process << "  OTOH, there is a kernel region: " << kreg->range() << ": " << kreg->name();
+    } else {
+        dbg() << process << "  AND no kernel region either";
+    }
+
+    process.dump_regions();
+
+    kprintf("Kernel regions:\n");
+    kprintf("BEGIN       END         SIZE        ACCESS  NAME\n");
+    for (auto& region : MM.m_kernel_regions) {
+        kprintf("%08x -- %08x    %08x    %c%c%c%c%c%c    %s\n",
+            region.vaddr().get(),
+            region.vaddr().offset(region.size() - 1).get(),
+            region.size(),
+            region.is_readable() ? 'R' : ' ',
+            region.is_writable() ? 'W' : ' ',
+            region.is_executable() ? 'X' : ' ',
+            region.is_shared() ? 'S' : ' ',
+            region.is_stack() ? 'T' : ' ',
+            region.vmobject().is_purgeable() ? 'P' : ' ',
+            region.name().characters());
+    }
     return nullptr;
     return nullptr;
 }
 }
 
 
@@ -567,7 +559,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
     dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
     dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
 #endif
 #endif
 
 
-    fast_u32_fill((u32*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(u32));
+    fast_u32_fill((u32*)page->paddr().offset(0xc0000000).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
     ++m_super_physical_pages_used;
     ++m_super_physical_pages_used;
     return page;
     return page;
 }
 }
@@ -601,6 +593,37 @@ void MemoryManager::flush_tlb(VirtualAddress vaddr)
                  : "memory");
                  : "memory");
 }
 }
 
 
+extern "C" PageTableEntry boot_pd3_pde1023_pt[1024];
+
+PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
+{
+    auto& pte = boot_pd3_pde1023_pt[4];
+    auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
+    if (pte.physical_page_base() != pd_paddr.as_ptr()) {
+        //dbgprintf("quickmap_pd: Mapping P%p at 0xffe04000 in pte @ %p\n", directory.m_directory_pages[pdpt_index]->paddr().as_ptr(), &pte);
+        pte.set_physical_page_base(pd_paddr.get());
+        pte.set_present(true);
+        pte.set_writable(true);
+        pte.set_user_allowed(false);
+        flush_tlb(VirtualAddress(0xffe04000));
+    }
+    return (PageDirectoryEntry*)0xffe04000;
+}
+
+PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
+{
+    auto& pte = boot_pd3_pde1023_pt[8];
+    if (pte.physical_page_base() != pt_paddr.as_ptr()) {
+        //dbgprintf("quickmap_pt: Mapping P%p at 0xffe08000 in pte @ %p\n", pt_paddr.as_ptr(), &pte);
+        pte.set_physical_page_base(pt_paddr.get());
+        pte.set_present(true);
+        pte.set_writable(true);
+        pte.set_user_allowed(false);
+        flush_tlb(VirtualAddress(0xffe08000));
+    }
+    return (PageTableEntry*)0xffe08000;
+}
+
 void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr, bool cache_disabled)
 void MemoryManager::map_for_kernel(VirtualAddress vaddr, PhysicalAddress paddr, bool cache_disabled)
 {
 {
     auto& pte = ensure_pte(kernel_page_directory(), vaddr);
     auto& pte = ensure_pte(kernel_page_directory(), vaddr);

+ 27 - 2
Kernel/VM/MemoryManager.h

@@ -20,6 +20,28 @@
 
 
 #define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
 #define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
 
 
+template<typename T>
+inline T* low_physical_to_virtual(T* physical)
+{
+    return (T*)(((u8*)physical) + 0xc0000000);
+}
+
+inline u32 low_physical_to_virtual(u32 physical)
+{
+    return physical + 0xc0000000;
+}
+
+template<typename T>
+inline T* virtual_to_low_physical(T* physical)
+{
+    return (T*)(((u8*)physical) - 0xc0000000);
+}
+
+inline u32 virtual_to_low_physical(u32 physical)
+{
+    return physical - 0xc0000000;
+}
+
 class KBuffer;
 class KBuffer;
 class SynthFSInode;
 class SynthFSInode;
 
 
@@ -38,7 +60,7 @@ class MemoryManager {
 public:
 public:
     static MemoryManager& the();
     static MemoryManager& the();
 
 
-    static void initialize(u32 physical_address_for_kernel_page_tables);
+    static void initialize();
 
 
     PageFaultResponse handle_page_fault(const PageFault&);
     PageFaultResponse handle_page_fault(const PageFault&);
 
 
@@ -85,7 +107,7 @@ public:
     static const Region* region_from_vaddr(const Process&, VirtualAddress);
     static const Region* region_from_vaddr(const Process&, VirtualAddress);
 
 
 private:
 private:
-    MemoryManager(u32 physical_address_for_kernel_page_tables);
+    MemoryManager();
     ~MemoryManager();
     ~MemoryManager();
 
 
     enum class AccessSpace { Kernel, User };
     enum class AccessSpace { Kernel, User };
@@ -116,6 +138,9 @@ private:
     u8* quickmap_page(PhysicalPage&);
     u8* quickmap_page(PhysicalPage&);
     void unquickmap_page();
     void unquickmap_page();
 
 
+    PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
+    PageTableEntry* quickmap_pt(PhysicalAddress);
+
     PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
     PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
 
 
     PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
     PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);

+ 23 - 22
Kernel/VM/PageDirectory.cpp

@@ -4,7 +4,7 @@
 #include <Kernel/VM/PageDirectory.h>
 #include <Kernel/VM/PageDirectory.h>
 
 
 static const u32 userspace_range_base = 0x01000000;
 static const u32 userspace_range_base = 0x01000000;
-static const u32 kernelspace_range_base = 0xc0000000;
+static const u32 kernelspace_range_base = 0xc0800000;
 
 
 static HashMap<u32, PageDirectory*>& cr3_map()
 static HashMap<u32, PageDirectory*>& cr3_map()
 {
 {
@@ -21,22 +21,23 @@ RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
     return cr3_map().get(cr3).value_or({});
     return cr3_map().get(cr3).value_or({});
 }
 }
 
 
-PageDirectory::PageDirectory(PhysicalAddress paddr)
-    : m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000)
-{
-    m_directory_table = PhysicalPage::create(paddr, true, false);
-    m_directory_pages[0] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 1), true, false);
-    m_directory_pages[1] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 2), true, false);
-    m_directory_pages[2] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 3), true, false);
-    m_directory_pages[3] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 4), true, false);
-
-    table().raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
-    table().raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
-    table().raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
-    table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
+extern "C" u32 boot_pdpt;
+extern "C" u32 boot_pd0;
+extern "C" u32 boot_pd3;
 
 
-    InterruptDisabler disabler;
-    cr3_map().set(cr3(), this);
+PageDirectory::PageDirectory()
+    : m_range_allocator(VirtualAddress(0xc0c00000), 0x3f000000)
+{
+    // Adopt the page tables already set up by boot.S
+    PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((u32)&boot_pdpt));
+    PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((u32)&boot_pd0));
+    PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((u32)&boot_pd3));
+    kprintf("MM: boot_pdpt @ P%p\n", boot_pdpt_paddr.get());
+    kprintf("MM: boot_pd0 @ P%p\n", boot_pd0_paddr.get());
+    kprintf("MM: boot_pd3 @ P%p\n", boot_pd3_paddr.get());
+    m_directory_table = PhysicalPage::create(boot_pdpt_paddr, true, false);
+    m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, true, false);
+    m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, true, false);
 }
 }
 
 
 PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator)
 PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator)
@@ -44,7 +45,6 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
     , m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base))
     , m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base))
 {
 {
     // Set up a userspace page directory
     // Set up a userspace page directory
-
     m_directory_table = MM.allocate_supervisor_physical_page();
     m_directory_table = MM.allocate_supervisor_physical_page();
     m_directory_pages[0] = MM.allocate_supervisor_physical_page();
     m_directory_pages[0] = MM.allocate_supervisor_physical_page();
     m_directory_pages[1] = MM.allocate_supervisor_physical_page();
     m_directory_pages[1] = MM.allocate_supervisor_physical_page();
@@ -58,10 +58,11 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
     table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
     table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
 
 
     // Clone bottom 8 MB of mappings from kernel_page_directory
     // Clone bottom 8 MB of mappings from kernel_page_directory
-    table().directory(0)[0].copy_from({}, MM.kernel_page_directory().table().directory(0)[0]);
-    table().directory(0)[1].copy_from({}, MM.kernel_page_directory().table().directory(0)[1]);
-    table().directory(0)[2].copy_from({}, MM.kernel_page_directory().table().directory(0)[2]);
-    table().directory(0)[3].copy_from({}, MM.kernel_page_directory().table().directory(0)[3]);
+    PageDirectoryEntry buffer[4];
+    auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
+    memcpy(buffer, kernel_pd, sizeof(PageDirectoryEntry) * 4);
+    auto* new_pd = MM.quickmap_pd(*this, 0);
+    memcpy(new_pd, buffer, sizeof(PageDirectoryEntry) * 4);
 
 
     InterruptDisabler disabler;
     InterruptDisabler disabler;
     cr3_map().set(cr3(), this);
     cr3_map().set(cr3(), this);
@@ -74,4 +75,4 @@ PageDirectory::~PageDirectory()
 #endif
 #endif
     InterruptDisabler disabler;
     InterruptDisabler disabler;
     cr3_map().remove(cr3());
     cr3_map().remove(cr3());
-}
+}

+ 3 - 3
Kernel/VM/PageDirectory.h

@@ -16,13 +16,13 @@ public:
     {
     {
         return adopt(*new PageDirectory(process, parent_range_allocator));
         return adopt(*new PageDirectory(process, parent_range_allocator));
     }
     }
-    static NonnullRefPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
+    static NonnullRefPtr<PageDirectory> create_kernel_page_directory() { return adopt(*new PageDirectory); }
     static RefPtr<PageDirectory> find_by_cr3(u32);
     static RefPtr<PageDirectory> find_by_cr3(u32);
 
 
     ~PageDirectory();
     ~PageDirectory();
 
 
     u32 cr3() const { return m_directory_table->paddr().get(); }
     u32 cr3() const { return m_directory_table->paddr().get(); }
-    PageDirectoryPointerTable& table() { return *reinterpret_cast<PageDirectoryPointerTable*>(cr3()); }
+    PageDirectoryPointerTable& table() { return *reinterpret_cast<PageDirectoryPointerTable*>(0xc0000000 + cr3()); }
 
 
     RangeAllocator& range_allocator() { return m_range_allocator; }
     RangeAllocator& range_allocator() { return m_range_allocator; }
 
 
@@ -31,7 +31,7 @@ public:
 
 
 private:
 private:
     PageDirectory(Process&, const RangeAllocator* parent_range_allocator);
     PageDirectory(Process&, const RangeAllocator* parent_range_allocator);
-    explicit PageDirectory(PhysicalAddress);
+    PageDirectory();
 
 
     Process* m_process { nullptr };
     Process* m_process { nullptr };
     RangeAllocator m_range_allocator;
     RangeAllocator m_range_allocator;

+ 1 - 1
Kernel/VM/Region.cpp

@@ -242,7 +242,7 @@ void Region::map_individual_page_impl(size_t page_index)
             pte.set_execute_disabled(!is_executable());
             pte.set_execute_disabled(!is_executable());
         pte.set_user_allowed(is_user_accessible());
         pte.set_user_allowed(is_user_accessible());
 #ifdef MM_DEBUG
 #ifdef MM_DEBUG
-    dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
+        dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
 #endif
 #endif
     }
     }
     MM.flush_tlb(page_vaddr);
     MM.flush_tlb(page_vaddr);

+ 7 - 4
Kernel/init.cpp

@@ -229,7 +229,7 @@ extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
 extern u32 __stack_chk_guard;
 extern u32 __stack_chk_guard;
 u32 __stack_chk_guard;
 u32 __stack_chk_guard;
 
 
-extern "C" [[noreturn]] void init(u32 physical_address_for_kernel_page_tables)
+extern "C" [[noreturn]] void init()
 {
 {
     // this is only used one time, directly below here. we can't use this part
     // this is only used one time, directly below here. we can't use this part
     // of libc at this point in the boot process, or we'd just pull strstr in
     // of libc at this point in the boot process, or we'd just pull strstr in
@@ -247,7 +247,8 @@ extern "C" [[noreturn]] void init(u32 physical_address_for_kernel_page_tables)
     // process on live hardware.
     // process on live hardware.
     //
     //
     // note: it must be the first option in the boot cmdline.
     // note: it must be the first option in the boot cmdline.
-    if (multiboot_info_ptr->cmdline && bad_prefix_check(reinterpret_cast<const char*>(multiboot_info_ptr->cmdline), "serial_debug"))
+    u32 cmdline = low_physical_to_virtual(multiboot_info_ptr->cmdline);
+    if (cmdline && bad_prefix_check(reinterpret_cast<const char*>(cmdline), "serial_debug"))
         set_serial_debug(true);
         set_serial_debug(true);
 
 
     detect_cpu_features();
     detect_cpu_features();
@@ -256,14 +257,16 @@ extern "C" [[noreturn]] void init(u32 physical_address_for_kernel_page_tables)
     slab_alloc_init();
     slab_alloc_init();
 
 
     // must come after kmalloc_init because we use AK_MAKE_ETERNAL in KParams
     // must come after kmalloc_init because we use AK_MAKE_ETERNAL in KParams
-    new KParams(String(reinterpret_cast<const char*>(multiboot_info_ptr->cmdline)));
+    new KParams(String(reinterpret_cast<const char*>(cmdline)));
 
 
     bool text_debug = KParams::the().has("text_debug");
     bool text_debug = KParams::the().has("text_debug");
     bool complete_acpi_disable = KParams::the().has("noacpi");
     bool complete_acpi_disable = KParams::the().has("noacpi");
     bool dynamic_acpi_disable = KParams::the().has("noacpi_aml");
     bool dynamic_acpi_disable = KParams::the().has("noacpi_aml");
     bool pci_mmio_disable = KParams::the().has("nopci_mmio");
     bool pci_mmio_disable = KParams::the().has("nopci_mmio");
 
 
-    MemoryManager::initialize(physical_address_for_kernel_page_tables);
+    complete_acpi_disable = true;
+
+    MemoryManager::initialize();
 
 
     if (complete_acpi_disable) {
     if (complete_acpi_disable) {
         ACPIParser::initialize_limited();
         ACPIParser::initialize_limited();

+ 10 - 6
Kernel/linker.ld

@@ -2,20 +2,21 @@ ENTRY(start)
 
 
 SECTIONS
 SECTIONS
 {
 {
-    . = 0x100000;
+    . = 0xc0100000;
 
 
-    .text BLOCK(4K) : ALIGN(4K)
+    start_of_kernel_image = .;
+
+    .text ALIGN(4K) : AT (ADDR(.text) - 0xc0000000)
     {
     {
         Arch/i386/Boot/boot.ao
         Arch/i386/Boot/boot.ao
         *(.multiboot)
         *(.multiboot)
-        *(.page_tables)
         start_of_kernel_text = .;
         start_of_kernel_text = .;
         *(.text)
         *(.text)
         *(.text.startup)
         *(.text.startup)
         end_of_kernel_text = .;
         end_of_kernel_text = .;
     }
     }
 
 
-    .rodata BLOCK(4K) : ALIGN(4K)
+    .rodata ALIGN(4K) : AT (ADDR(.rodata) - 0xc0000000)
     {
     {
         start_ctors = .;
         start_ctors = .;
         *(.ctors)
         *(.ctors)
@@ -24,18 +25,21 @@ SECTIONS
         *(.rodata)
         *(.rodata)
     }
     }
 
 
-    .data BLOCK(4K) : ALIGN(4K)
+    .data ALIGN(4K) : AT (ADDR(.data) - 0xc0000000)
     {
     {
         start_of_kernel_data = .;
         start_of_kernel_data = .;
         *(.data)
         *(.data)
         end_of_kernel_data = .;
         end_of_kernel_data = .;
     }
     }
 
 
-    .bss BLOCK(4K) : ALIGN(4K)
+    .bss ALIGN(4K) : AT (ADDR(.bss) - 0xc0000000)
     {
     {
         start_of_kernel_bss = .;
         start_of_kernel_bss = .;
+        *(page_tables)
         *(COMMON)
         *(COMMON)
         *(.bss)
         *(.bss)
         end_of_kernel_bss = .;
         end_of_kernel_bss = .;
     }
     }
+
+    end_of_kernel_image = .;
 }
 }