From bd33c6627394b2166e1419965dd3b2d2dc0c401f Mon Sep 17 00:00:00 2001 From: Jesse Buhagiar Date: Thu, 21 Nov 2019 16:08:11 +1100 Subject: [PATCH] Kernel: Move Kernel mapping to 0xc0000000 The kernel is now no longer identity mapped to the bottom 8MiB of memory, and is now mapped at the higher address of `0xc0000000`. The lower ~1MiB of memory (from GRUB's mmap), however is still identity mapped to provide an easy way for the kernel to get physical pages for things such as DMA etc. These could later be mapped to the higher address too, as I'm not too sure how to go about doing this elegantly without a lot of address subtractions. --- Kernel/Arch/i386/Boot/boot.S | 76 +++++++++++++++++++++++++++++++--- Kernel/Arch/i386/CPU.h | 3 ++ Kernel/Devices/PATAChannel.cpp | 20 +++++---- Kernel/Devices/PATAChannel.h | 4 +- Kernel/Heap/kmalloc.cpp | 16 +++---- Kernel/Makefile | 2 +- Kernel/TTY/VirtualConsole.cpp | 4 +- Kernel/VM/MemoryManager.cpp | 50 ++++++++++------------ Kernel/VM/MemoryManager.h | 5 ++- Kernel/VM/PageDirectory.cpp | 2 +- Kernel/VM/PageDirectory.h | 2 +- Kernel/VM/Region.cpp | 2 +- Kernel/linker.ld | 10 ++--- 13 files changed, 132 insertions(+), 64 deletions(-) diff --git a/Kernel/Arch/i386/Boot/boot.S b/Kernel/Arch/i386/Boot/boot.S index 535f13d8489..14907f36121 100644 --- a/Kernel/Arch/i386/Boot/boot.S +++ b/Kernel/Arch/i386/Boot/boot.S @@ -34,9 +34,9 @@ stack_top: .section .page_tables .align 4096 page_tables_start: -.skip 4096*3 +.skip 4096*5 -.section .text +.section .text.boot .global start .type start, @function @@ -51,13 +51,79 @@ start: cli cld + # We first save the multiboot_info_ptr so it doesn't get trampled + addl $0xc0000000, %ebx + movl %ebx, multiboot_info_ptr - 0xc0000000 + + # First, let's set up the first page table to map the the first 4MiB of memory. + # This makes sure we don't crash after we set CR3 and enable paging + movl $0x200, %ecx + xor %ebx, %ebx + movl $((page_tables_start + (4096 * 1)) - 0xc0000000), %edx + call make_table + + # Now we create the kernel mappings. The kernel maps 0MiB -> 8MiB into its address space at + # v0xc0000000. + movl $0x400, %ecx + movl $0x0, %ebx # ebx is the base pointer (kernel base is at physical address 0 in this case) + movl $((page_tables_start + (4096 * 2)) - 0xc0000000), %edx + call make_table + + movl $0x400, %ecx + movl $0x400000, %ebx # ebx is the base pointer (kernel base is at physical address 0 in this case) + movl $((page_tables_start + (4096 * 3)) - 0xc0000000), %edx + call make_table + + + # Okay, so we have a page table that contains addresses of the first 4MiB of memory. Let's insert this into the + # boot page directory. The index we need to insert it into is at vaddr >> 22, which is the page directory index. + # This reveals that we need to insert the page directory into 0xc0000000 >> 22 = 768 + # An interesting quirk is that we must also identity map the first 4MiB too, as the next instruction after enabling + # paging is at a physical address, which cause a page fault. As we have no handler, this would cause a triple fault. + movl $((page_tables_start + (4096 * 1)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 0 + movl $((page_tables_start + (4096 * 2)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 768 * 4 + movl $((page_tables_start + (4096 * 3)) - 0xc0000000 + 0x003), page_tables_start - 0xc0000000 + 769 * 4 + + # Now let's load the CR3 register with our page directory + movl $(page_tables_start - 0xc0000000), %ecx + movl %ecx, %cr3 + + # Let's enable paging! + movl %cr0, %ecx + orl $0x80000001, %ecx + movl %ecx, %cr0 + + lea high_address_space_start, %ecx + jmp *%ecx + + +# Make a page table. This is called with the following arguments: +# ebx = base pointer of mapping +# edx = page table physical address +# ecx = number of pages to map +# +# Registers used in function +# eax = loop counter +make_table: + xorl %eax, %eax + .loop: + pushl %ecx + movl %ebx, %ecx + orl $0x3, %ecx # addr | READ_WRITE | PAGE_PRESENT + movl %ecx, 0(%edx, %eax, 4) + addl $0x1000, %ebx + inc %eax + popl %ecx + loop .loop + ret + +# At this point, the CPU now starts reading instructions from (virtual) address 0xc00100000 +high_address_space_start: mov $stack_top, %esp and $-16, %esp - mov %ebx, multiboot_info_ptr - - pushl $page_tables_start + pushl $(page_tables_start - 0xc0000000) call init add $4, %esp diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 5fa9057bb19..e098b05ffe3 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -8,6 +8,8 @@ #define PAGE_SIZE 4096 #define PAGE_MASK 0xfffff000 +static const u32 kernel_virtual_base = 0xc0000000; + class MemoryManager; class PageTableEntry; @@ -89,6 +91,7 @@ class PageDirectoryEntry { public: PageTableEntry* page_table_base() { return reinterpret_cast(m_raw & 0xfffff000u); } + PageTableEntry* page_table_virtual_base() { return reinterpret_cast((m_raw + kernel_virtual_base) & 0xfffff000u); } void set_page_table_base(u32 value) { m_raw &= 0xfff; diff --git a/Kernel/Devices/PATAChannel.cpp b/Kernel/Devices/PATAChannel.cpp index 5e2fb84cb26..0cde8e7c9d9 100644 --- a/Kernel/Devices/PATAChannel.cpp +++ b/Kernel/Devices/PATAChannel.cpp @@ -116,11 +116,13 @@ void PATAChannel::initialize(bool force_pio) kprintf("PATAChannel: PATA Controller found! id=%w:%w\n", id.vendor_id, id.device_id); } }); + + m_prdt_page = MM.allocate_supervisor_physical_page(); m_force_pio.resource() = false; if (!m_pci_address.is_null()) { // Let's try to set up DMA transfers. PCI::enable_bus_mastering(m_pci_address); - m_prdt.end_of_table = 0x8000; + prdt().end_of_table = 0x8000; m_bus_master_base = PCI::get_BAR4(m_pci_address) & 0xfffc; m_dma_buffer_page = MM.allocate_supervisor_physical_page(); kprintf("PATAChannel: Bus master IDE: I/O @ %x\n", m_bus_master_base); @@ -259,16 +261,16 @@ bool PATAChannel::ata_read_sectors_with_dma(u32 lba, u16 count, u8* outbuf, bool disable_irq(); - m_prdt.offset = m_dma_buffer_page->paddr(); - m_prdt.size = 512 * count; + prdt().offset = m_dma_buffer_page->paddr(); + prdt().size = 512 * count; - ASSERT(m_prdt.size <= PAGE_SIZE); + ASSERT(prdt().size <= PAGE_SIZE); // Stop bus master IO::out8(m_bus_master_base, 0); // Write the PRDT location - IO::out32(m_bus_master_base + 4, (u32)&m_prdt); + IO::out32(m_bus_master_base + 4, (u32)&prdt()); // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6); @@ -338,18 +340,18 @@ bool PATAChannel::ata_write_sectors_with_dma(u32 lba, u16 count, const u8* inbuf disable_irq(); - m_prdt.offset = m_dma_buffer_page->paddr(); - m_prdt.size = 512 * count; + prdt().offset = m_dma_buffer_page->paddr(); + prdt().size = 512 * count; memcpy(m_dma_buffer_page->paddr().as_ptr(), inbuf, 512 * count); - ASSERT(m_prdt.size <= PAGE_SIZE); + ASSERT(prdt().size <= PAGE_SIZE); // Stop bus master IO::out8(m_bus_master_base, 0); // Write the PRDT location - IO::out32(m_bus_master_base + 4, (u32)&m_prdt); + IO::out32(m_bus_master_base + 4, (u32)&prdt()); // Turn on "Interrupt" and "Error" flag. The error flag should be cleared by hardware. IO::out8(m_bus_master_base + 2, IO::in8(m_bus_master_base + 2) | 0x6); diff --git a/Kernel/Devices/PATAChannel.h b/Kernel/Devices/PATAChannel.h index 0806c2fa8c1..7a1a4ca1a01 100644 --- a/Kernel/Devices/PATAChannel.h +++ b/Kernel/Devices/PATAChannel.h @@ -55,6 +55,8 @@ private: bool ata_read_sectors(u32, u16, u8*, bool); bool ata_write_sectors(u32, u16, const u8*, bool); + PhysicalRegionDescriptor& prdt() { return *reinterpret_cast(m_prdt_page->paddr().as_ptr()); } + // Data members u8 m_channel_number { 0 }; // Channel number. 0 = master, 1 = slave u16 m_io_base { 0x1F0 }; @@ -63,7 +65,7 @@ private: volatile bool m_interrupted { false }; PCI::Address m_pci_address; - PhysicalRegionDescriptor m_prdt; + RefPtr m_prdt_page; RefPtr m_dma_buffer_page; u16 m_bus_master_base { 0 }; Lockable m_dma_enabled; diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index e15c7469030..0c8cd105fe2 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -6,11 +6,11 @@ #include #include #include +#include #include #include #include #include -#include #define SANITIZE_KMALLOC @@ -20,11 +20,11 @@ struct [[gnu::packed]] allocation_t size_t nchunk; }; -#define BASE_PHYSICAL (4 * MB) +#define KMALLOC_RANGE_BASE (0xc0000000 + (4 * MB)) #define CHUNK_SIZE 8 #define POOL_SIZE (3 * MB) -#define ETERNAL_BASE_PHYSICAL (2 * MB) +#define ETERNAL_RANGE_BASE (0xc0000000 + (2 * MB)) #define ETERNAL_RANGE_SIZE (2 * MB) static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8]; @@ -42,21 +42,21 @@ static u8* s_end_of_eternal_range; bool is_kmalloc_address(const void* ptr) { - if (ptr >= (u8*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr) + if (ptr >= (u8*)ETERNAL_RANGE_BASE && ptr < s_next_eternal_ptr) return true; - return (size_t)ptr >= BASE_PHYSICAL && (size_t)ptr <= (BASE_PHYSICAL + POOL_SIZE); + return (size_t)ptr >= KMALLOC_RANGE_BASE && (size_t)ptr <= (KMALLOC_RANGE_BASE + POOL_SIZE); } void kmalloc_init() { memset(&alloc_map, 0, sizeof(alloc_map)); - memset((void*)BASE_PHYSICAL, 0, POOL_SIZE); + memset((void*)KMALLOC_RANGE_BASE, 0, POOL_SIZE); kmalloc_sum_eternal = 0; sum_alloc = 0; sum_free = POOL_SIZE; - s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL; + s_next_eternal_ptr = (u8*)ETERNAL_RANGE_BASE; s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE; } @@ -134,7 +134,7 @@ void* kmalloc_impl(size_t size) ++chunks_here; if (chunks_here == chunks_needed) { - auto* a = (allocation_t*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE)); + auto* a = (allocation_t*)(KMALLOC_RANGE_BASE + (first_chunk * CHUNK_SIZE)); u8* ptr = (u8*)a; ptr += sizeof(allocation_t); a->nchunk = chunks_needed; diff --git a/Kernel/Makefile b/Kernel/Makefile index 01fd177929d..6abe252e293 100644 --- a/Kernel/Makefile +++ b/Kernel/Makefile @@ -107,7 +107,7 @@ CXXFLAGS += -nostdlib -nostdinc -nostdinc++ CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/8.3.0/ CXXFLAGS += -I../Toolchain/Local/i686-pc-serenity/include/c++/8.3.0/i686-pc-serenity/ DEFINES += -DKERNEL -LDFLAGS += -Ttext 0x100000 -Wl,-T linker.ld -nostdlib +LDFLAGS += -Wl,-T linker.ld -nostdlib all: $(KERNEL) kernel.map diff --git a/Kernel/TTY/VirtualConsole.cpp b/Kernel/TTY/VirtualConsole.cpp index 78d72f14217..ab7a167694e 100644 --- a/Kernel/TTY/VirtualConsole.cpp +++ b/Kernel/TTY/VirtualConsole.cpp @@ -1,10 +1,10 @@ #include "VirtualConsole.h" #include "IO.h" #include "StdLib.h" -#include #include #include #include +#include static u8* s_vga_buffer; static VirtualConsole* s_consoles[6]; @@ -32,7 +32,7 @@ void VirtualConsole::flush_vga_cursor() void VirtualConsole::initialize() { - s_vga_buffer = (u8*)0xb8000; + s_vga_buffer = (u8*)(kernel_virtual_base + 0xb8000); memset(s_consoles, 0, sizeof(s_consoles)); s_active_console = -1; } diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index dcfb0418288..e539132a4b8 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -24,7 +24,8 @@ MemoryManager::MemoryManager(u32 physical_address_for_kernel_page_tables) { m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(physical_address_for_kernel_page_tables)); m_page_table_zero = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE); - m_page_table_one = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 2); + m_page_table_768 = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 2); + m_page_table_769 = (PageTableEntry*)(physical_address_for_kernel_page_tables + PAGE_SIZE * 3); initialize_paging(); kprintf("MM initialized.\n"); @@ -38,7 +39,6 @@ void MemoryManager::populate_page_directory(PageDirectory& page_directory) { page_directory.m_directory_page = allocate_supervisor_physical_page(); page_directory.entries()[0].copy_from({}, kernel_page_directory().entries()[0]); - page_directory.entries()[1].copy_from({}, kernel_page_directory().entries()[1]); // Defer to the kernel page tables for 0xC0000000-0xFFFFFFFF for (int i = 768; i < 1024; ++i) page_directory.entries()[i].copy_from({}, kernel_page_directory().entries()[i]); @@ -47,7 +47,6 @@ void MemoryManager::populate_page_directory(PageDirectory& page_directory) void MemoryManager::initialize_paging() { memset(m_page_table_zero, 0, PAGE_SIZE); - memset(m_page_table_one, 0, PAGE_SIZE); #ifdef MM_DEBUG dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3()); @@ -60,16 +59,12 @@ void MemoryManager::initialize_paging() map_protected(VirtualAddress(0), PAGE_SIZE); #ifdef MM_DEBUG - dbgprintf("MM: Identity map bottom 8MB\n"); + dbgprintf("MM: Identity map bottom 1MiB\n", kernel_virtual_base); #endif - // The bottom 8 MB (except for the null page) are identity mapped & supervisor only. - // Every process shares these mappings. - create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (8 * MB) - PAGE_SIZE); - - // FIXME: We should move everything kernel-related above the 0xc0000000 virtual mark. + create_identity_mapping(kernel_page_directory(), VirtualAddress(PAGE_SIZE), (1 * MB) - PAGE_SIZE); // Basic physical memory map: - // 0 -> 1 MB We're just leaving this alone for now. + // 0 -> 1 MB Page table/directory / I/O memory region // 1 -> 3 MB Kernel image. // (last page before 2MB) Used by quickmap_page(). // 2 MB -> 4 MB kmalloc_eternal() space. @@ -78,8 +73,10 @@ void MemoryManager::initialize_paging() // 8 MB -> MAX Userspace physical pages (available for allocation!) // Basic virtual memory map: - // 0 MB -> 8MB Identity mapped. - // 0xc0000000-0xffffffff Kernel-only virtual address space. + // 0x00000000-0x00100000 Identity mapped for Kernel Physical pages handed out by allocate_supervisor_physical_page (for I/O, page tables etc). + // 0x00800000-0xbfffffff Userspace program virtual address space. + // 0xc0001000-0xc0800000 Kernel-only virtual address space. This area is mapped to the first 8 MB of physical memory and includes areas for kmalloc, etc. + // 0xc0800000-0xffffffff Kernel virtual address space for kernel Page Directory. #ifdef MM_DEBUG dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get()); @@ -100,10 +97,6 @@ void MemoryManager::initialize_paging() if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) continue; - // FIXME: Maybe make use of stuff below the 1MB mark? - if (mmap->addr < (1 * MB)) - continue; - if ((mmap->addr + mmap->len) > 0xffffffff) continue; @@ -131,9 +124,8 @@ void MemoryManager::initialize_paging() for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) { auto addr = PhysicalAddress(page_base); - if (page_base < 7 * MB) { - // nothing - } else if (page_base >= 7 * MB && page_base < 8 * MB) { + // Anything below 1 * MB is a Kernel Physical region + if (page_base > PAGE_SIZE && page_base < 1 * MB) { if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) { m_super_physical_regions.append(PhysicalRegion::create(addr, addr)); region = m_super_physical_regions.last(); @@ -141,7 +133,7 @@ void MemoryManager::initialize_paging() } else { region->expand(region->lower(), addr); } - } else { + } else if (page_base > 8 * MB) { if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) { m_user_physical_regions.append(PhysicalRegion::create(addr, addr)); region = m_user_physical_regions.last(); @@ -162,7 +154,6 @@ void MemoryManager::initialize_paging() #ifdef MM_DEBUG dbgprintf("MM: Installing page directory\n"); #endif - // Turn on CR4.PGE so the CPU will respect the G bit in page tables. asm volatile( "mov %cr4, %eax\n" @@ -175,10 +166,6 @@ void MemoryManager::initialize_paging() "orl $0x80000001, %%eax\n" "movl %%eax, %%cr0\n" :: : "%eax", "memory"); - -#ifdef MM_DEBUG - dbgprintf("MM: Paging initialized.\n"); -#endif } PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr) @@ -199,9 +186,16 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual pde.set_present(true); pde.set_writable(true); pde.set_global(true); - } else if (page_directory_index == 1) { + } else if (page_directory_index == 768) { ASSERT(&page_directory == m_kernel_page_directory); - pde.set_page_table_base((u32)m_page_table_one); + pde.set_page_table_base((u32)m_page_table_768); + pde.set_user_allowed(false); + pde.set_present(true); + pde.set_writable(true); + pde.set_global(true); + } else if (page_directory_index == 769) { + ASSERT(&page_directory == m_kernel_page_directory); + pde.set_page_table_base((u32)m_page_table_769); pde.set_user_allowed(false); pde.set_present(true); pde.set_writable(true); @@ -227,7 +221,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual page_directory.m_physical_pages.set(page_directory_index, move(page_table)); } } - return pde.page_table_base()[page_table_index]; + return pde.page_table_virtual_base()[page_table_index]; } void MemoryManager::map_protected(VirtualAddress vaddr, size_t length) diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index 619f95630c3..380dd768de1 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -8,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -114,7 +114,8 @@ private: RefPtr m_kernel_page_directory; PageTableEntry* m_page_table_zero { nullptr }; - PageTableEntry* m_page_table_one { nullptr }; + PageTableEntry* m_page_table_768 { nullptr }; + PageTableEntry* m_page_table_769 { nullptr }; VirtualAddress m_quickmap_addr; diff --git a/Kernel/VM/PageDirectory.cpp b/Kernel/VM/PageDirectory.cpp index 3533d6cfa9e..e95b313b8ef 100644 --- a/Kernel/VM/PageDirectory.cpp +++ b/Kernel/VM/PageDirectory.cpp @@ -22,7 +22,7 @@ RefPtr PageDirectory::find_by_pdb(u32 pdb) } PageDirectory::PageDirectory(PhysicalAddress paddr) - : m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000) + : m_range_allocator(VirtualAddress(kernelspace_range_base + 0x800000), 0x3f000000) { m_directory_page = PhysicalPage::create(paddr, true, false); InterruptDisabler disabler; diff --git a/Kernel/VM/PageDirectory.h b/Kernel/VM/PageDirectory.h index b6458c1055e..7f1e63639df 100644 --- a/Kernel/VM/PageDirectory.h +++ b/Kernel/VM/PageDirectory.h @@ -22,7 +22,7 @@ public: ~PageDirectory(); u32 cr3() const { return m_directory_page->paddr().get(); } - PageDirectoryEntry* entries() { return reinterpret_cast(cr3()); } + PageDirectoryEntry* entries() { return reinterpret_cast(cr3() + kernel_virtual_base); } void flush(VirtualAddress); diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 7104dd15346..fe75af58d13 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -252,7 +252,7 @@ void Region::map(PageDirectory& page_directory) pte.set_user_allowed(is_user_accessible()); page_directory.flush(page_vaddr); #ifdef MM_DEBUG - dbgprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, name().characters(), page_vaddr.get(), physical_page ? physical_page->paddr().get() : 0, physical_page.ptr()); + kprintf("MM: >> map_region_at_address (PD=%p) '%s' V%p => P%p (@%p)\n", &page_directory, name().characters(), page_vaddr.get(), physical_page ? physical_page->paddr().get() : 0, physical_page.ptr()); #endif } } diff --git a/Kernel/linker.ld b/Kernel/linker.ld index 43d1115cafc..4ac9464f92f 100644 --- a/Kernel/linker.ld +++ b/Kernel/linker.ld @@ -2,9 +2,9 @@ ENTRY(start) SECTIONS { - . = 0x100000; + . = 0xc0100000; - .text BLOCK(4K) : ALIGN(4K) + .text ALIGN(4K) : AT(ADDR(.text) - 0xc0000000) { Arch/i386/Boot/boot.ao *(.multiboot) @@ -13,7 +13,7 @@ SECTIONS *(.text.startup) } - .rodata BLOCK(4K) : ALIGN(4K) + .rodata ALIGN(4K) : AT(ADDR(.rodata) - 0xc0000000) { start_ctors = .; *(.ctors) @@ -22,12 +22,12 @@ SECTIONS *(.rodata) } - .data BLOCK(4K) : ALIGN(4K) + .data ALIGN(4K) : AT(ADDR(.data) - 0xc0000000) { *(.data) } - .bss BLOCK(4K) : ALIGN(4K) + .bss ALIGN(4K) : AT(ADDR(.bss) - 0xc0000000) { *(COMMON) *(.bss)