|
@@ -10,6 +10,7 @@
|
|
|
|
|
|
#include <Kernel/Arch/aarch64/ASM_wrapper.h>
|
|
|
#include <Kernel/Arch/aarch64/Registers.h>
|
|
|
+#include <Kernel/Prekernel/Arch/aarch64/MMIO.h>
|
|
|
#include <Kernel/Prekernel/Arch/aarch64/UART.h>
|
|
|
|
|
|
// Documentation here for Aarch64 Address Translations
|
|
@@ -26,8 +27,6 @@ namespace Prekernel {
|
|
|
// physical memory
|
|
|
constexpr u32 START_OF_NORMAL_MEMORY = 0x00000000;
|
|
|
constexpr u32 END_OF_NORMAL_MEMORY = 0x3EFFFFFF;
|
|
|
-constexpr u32 START_OF_DEVICE_MEMORY = 0x3F000000;
|
|
|
-constexpr u32 END_OF_DEVICE_MEMORY = 0x3FFFFFFF;
|
|
|
|
|
|
// 4KiB page size was chosen for the prekernel to make this code slightly simpler
|
|
|
constexpr u32 GRANULE_SIZE = 0x1000;
|
|
@@ -49,24 +48,11 @@ constexpr u32 INNER_SHAREABLE = (3 << 8);
|
|
|
constexpr u32 NORMAL_MEMORY = (0 << 2);
|
|
|
constexpr u32 DEVICE_MEMORY = (1 << 2);
|
|
|
|
|
|
-constexpr u64* descriptor_to_pointer(u64 descriptor)
|
|
|
+constexpr u64* descriptor_to_pointer(FlatPtr descriptor)
|
|
|
{
|
|
|
return (u64*)(descriptor & DESCRIPTOR_MASK);
|
|
|
}
|
|
|
|
|
|
-using page_table_t = u8*;
|
|
|
-
|
|
|
-static void zero_pages(u64* start, u64* end)
|
|
|
-{
|
|
|
- // Memset all page table memory to zero
|
|
|
- for (u64* p = (u64*)start;
|
|
|
- p < (u64*)end;
|
|
|
- p++) {
|
|
|
-
|
|
|
- *p = 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
namespace {
|
|
|
class PageBumpAllocator {
|
|
|
public:
|
|
@@ -78,7 +64,7 @@ public:
|
|
|
if (m_start >= m_end) {
|
|
|
Prekernel::panic("Invalid memory range passed to PageBumpAllocator");
|
|
|
}
|
|
|
- if ((u64)m_start % PAGE_TABLE_SIZE != 0 || (u64)m_end % PAGE_TABLE_SIZE != 0) {
|
|
|
+ if ((FlatPtr)m_start % PAGE_TABLE_SIZE != 0 || (FlatPtr)m_end % PAGE_TABLE_SIZE != 0) {
|
|
|
Prekernel::panic("Memory range passed into PageBumpAllocator not aligned to PAGE_TABLE_SIZE");
|
|
|
}
|
|
|
}
|
|
@@ -90,63 +76,74 @@ public:
|
|
|
}
|
|
|
|
|
|
u64* page = m_current;
|
|
|
- m_current += (PAGE_TABLE_SIZE / sizeof(u64));
|
|
|
+ m_current += (PAGE_TABLE_SIZE / sizeof(FlatPtr));
|
|
|
|
|
|
- zero_pages(page, page + (PAGE_TABLE_SIZE / sizeof(u64)));
|
|
|
+ zero_page(page);
|
|
|
return page;
|
|
|
}
|
|
|
|
|
|
private:
|
|
|
+ void zero_page(u64* page)
|
|
|
+ {
|
|
|
+ // Memset all page table memory to zero
|
|
|
+ for (u64* p = page; p < page + (PAGE_TABLE_SIZE / sizeof(u64)); p++) {
|
|
|
+ *p = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
const u64* m_start;
|
|
|
const u64* m_end;
|
|
|
u64* m_current;
|
|
|
};
|
|
|
}
|
|
|
|
|
|
-static void build_identity_map(PageBumpAllocator& allocator)
|
|
|
+static void insert_identity_entries_for_physical_memory_range(PageBumpAllocator& allocator, u64* page_table, FlatPtr start, FlatPtr end, u64 flags)
|
|
|
{
|
|
|
- u64* level1_table = allocator.take_page();
|
|
|
-
|
|
|
- level1_table[0] = (u64)allocator.take_page();
|
|
|
- level1_table[0] |= TABLE_DESCRIPTOR;
|
|
|
-
|
|
|
- u64* level2_table = descriptor_to_pointer(level1_table[0]);
|
|
|
- level2_table[0] = (u64)allocator.take_page();
|
|
|
- level2_table[0] |= TABLE_DESCRIPTOR;
|
|
|
+ // Not very efficient, but simple and it works.
|
|
|
+ for (FlatPtr addr = start; addr < end; addr += GRANULE_SIZE) {
|
|
|
+ // Each level has 9 bits (512 entries)
|
|
|
+ u64 level0_idx = (addr >> 39) & 0x1FF;
|
|
|
+ u64 level1_idx = (addr >> 30) & 0x1FF;
|
|
|
+ u64 level2_idx = (addr >> 21) & 0x1FF;
|
|
|
+ u64 level3_idx = (addr >> 12) & 0x1FF;
|
|
|
+
|
|
|
+ u64* level1_table = page_table;
|
|
|
+
|
|
|
+ if (level1_table[level0_idx] == 0) {
|
|
|
+ level1_table[level0_idx] = (FlatPtr)allocator.take_page();
|
|
|
+ level1_table[level0_idx] |= TABLE_DESCRIPTOR;
|
|
|
+ }
|
|
|
|
|
|
- u64* level3_table = descriptor_to_pointer(level2_table[0]);
|
|
|
+ u64* level2_table = descriptor_to_pointer(level1_table[level0_idx]);
|
|
|
|
|
|
- // // Set up L3 entries
|
|
|
- for (uint32_t l3_idx = 0; l3_idx < 512; l3_idx++) {
|
|
|
- level3_table[l3_idx] = (u64)allocator.take_page();
|
|
|
- level3_table[l3_idx] |= TABLE_DESCRIPTOR;
|
|
|
- }
|
|
|
+ if (level2_table[level1_idx] == 0) {
|
|
|
+ level2_table[level1_idx] = (FlatPtr)allocator.take_page();
|
|
|
+ level2_table[level1_idx] |= TABLE_DESCRIPTOR;
|
|
|
+ }
|
|
|
|
|
|
- // Set up L4 entries
|
|
|
- size_t page_index = 0;
|
|
|
+ u64* level3_table = descriptor_to_pointer(level2_table[level1_idx]);
|
|
|
|
|
|
- for (size_t addr = START_OF_NORMAL_MEMORY; addr < END_OF_NORMAL_MEMORY; addr += GRANULE_SIZE, page_index++) {
|
|
|
- u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
|
|
|
- u64* l4_entry = &level4_table[page_index % 512];
|
|
|
+ if (level3_table[level2_idx] == 0) {
|
|
|
+ level3_table[level2_idx] = (FlatPtr)allocator.take_page();
|
|
|
+ level3_table[level2_idx] |= TABLE_DESCRIPTOR;
|
|
|
+ }
|
|
|
|
|
|
+ u64* level4_table = descriptor_to_pointer(level3_table[level2_idx]);
|
|
|
+ u64* l4_entry = &level4_table[level3_idx];
|
|
|
*l4_entry = addr;
|
|
|
- *l4_entry |= ACCESS_FLAG;
|
|
|
- *l4_entry |= PAGE_DESCRIPTOR;
|
|
|
- *l4_entry |= INNER_SHAREABLE;
|
|
|
- *l4_entry |= NORMAL_MEMORY;
|
|
|
+ *l4_entry |= flags;
|
|
|
}
|
|
|
+}
|
|
|
|
|
|
- // Set up entries for last 16MB of memory (MMIO)
|
|
|
- for (size_t addr = START_OF_DEVICE_MEMORY; addr < END_OF_DEVICE_MEMORY; addr += GRANULE_SIZE, page_index++) {
|
|
|
- u64* level4_table = descriptor_to_pointer(level3_table[page_index / 512]);
|
|
|
- u64* l4_entry = &level4_table[page_index % 512];
|
|
|
+static void build_identity_map(PageBumpAllocator& allocator)
|
|
|
+{
|
|
|
+ u64* level1_table = allocator.take_page();
|
|
|
|
|
|
- *l4_entry = addr;
|
|
|
- *l4_entry |= ACCESS_FLAG;
|
|
|
- *l4_entry |= PAGE_DESCRIPTOR;
|
|
|
- *l4_entry |= OUTER_SHAREABLE;
|
|
|
- *l4_entry |= DEVICE_MEMORY;
|
|
|
- }
|
|
|
+ u64 normal_memory_flags = ACCESS_FLAG | PAGE_DESCRIPTOR | INNER_SHAREABLE | NORMAL_MEMORY;
|
|
|
+ u64 device_memory_flags = ACCESS_FLAG | PAGE_DESCRIPTOR | OUTER_SHAREABLE | DEVICE_MEMORY;
|
|
|
+
|
|
|
+ insert_identity_entries_for_physical_memory_range(allocator, level1_table, START_OF_NORMAL_MEMORY, END_OF_NORMAL_MEMORY, normal_memory_flags);
|
|
|
+ insert_identity_entries_for_physical_memory_range(allocator, level1_table, MMIO::the().peripheral_base_address(), MMIO::the().peripheral_end_address(), device_memory_flags);
|
|
|
}
|
|
|
|
|
|
static void switch_to_page_table(u8* page_table)
|