2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2021-03-09 21:09:07 +00:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2020-03-08 11:33:14 +00:00
|
|
|
#include <AK/Memory.h>
|
2020-08-25 01:35:19 +00:00
|
|
|
#include <AK/Singleton.h>
|
2019-04-03 13:13:07 +00:00
|
|
|
#include <Kernel/Process.h>
|
2020-01-17 22:05:37 +00:00
|
|
|
#include <Kernel/Random.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
2019-06-07 09:43:58 +00:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
|
|
|
#include <Kernel/VM/PageDirectory.h>
|
2019-04-03 13:13:07 +00:00
|
|
|
|
2020-02-16 00:27:42 +00:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-08-25 01:35:19 +00:00
|
|
|
static AK::Singleton<HashMap<u32, PageDirectory*>> s_cr3_map;
|
|
|
|
|
2019-12-25 01:46:17 +00:00
|
|
|
static HashMap<u32, PageDirectory*>& cr3_map()
|
2019-08-06 09:19:16 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2020-08-25 01:35:19 +00:00
|
|
|
return *s_cr3_map;
|
2019-08-06 09:19:16 +00:00
|
|
|
}
|
|
|
|
|
2019-12-25 01:46:17 +00:00
|
|
|
RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
|
2019-08-06 09:19:16 +00:00
|
|
|
{
|
2020-07-06 15:11:52 +00:00
|
|
|
ScopedSpinLock lock(s_mm_lock);
|
2019-12-25 01:46:17 +00:00
|
|
|
return cr3_map().get(cr3).value_or({});
|
2019-08-06 09:19:16 +00:00
|
|
|
}
|
|
|
|
|
2020-01-17 20:16:44 +00:00
|
|
|
extern "C" PageDirectoryEntry* boot_pdpt[4];
|
|
|
|
extern "C" PageDirectoryEntry boot_pd0[1024];
|
|
|
|
extern "C" PageDirectoryEntry boot_pd3[1024];
|
2019-12-25 10:22:16 +00:00
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT PageDirectory::PageDirectory()
|
2020-01-17 18:59:20 +00:00
|
|
|
{
|
2021-05-14 12:34:26 +00:00
|
|
|
m_range_allocator.initialize_with_range(VirtualAddress(0xc2000000), 0x2f000000);
|
2020-06-02 04:55:09 +00:00
|
|
|
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
|
2020-01-17 22:05:37 +00:00
|
|
|
|
2020-01-17 18:59:20 +00:00
|
|
|
// Adopt the page tables already set up by boot.S
|
2020-03-08 09:36:51 +00:00
|
|
|
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
|
|
|
|
PhysicalAddress boot_pd0_paddr(virtual_to_low_physical((FlatPtr)boot_pd0));
|
|
|
|
PhysicalAddress boot_pd3_paddr(virtual_to_low_physical((FlatPtr)boot_pd3));
|
2021-03-09 21:09:07 +00:00
|
|
|
dmesgln("MM: boot_pdpt @ {}", boot_pdpt_paddr);
|
|
|
|
dmesgln("MM: boot_pd0 @ {}", boot_pd0_paddr);
|
|
|
|
dmesgln("MM: boot_pd3 @ {}", boot_pd3_paddr);
|
2020-01-17 18:59:20 +00:00
|
|
|
m_directory_table = PhysicalPage::create(boot_pdpt_paddr, true, false);
|
|
|
|
m_directory_pages[0] = PhysicalPage::create(boot_pd0_paddr, true, false);
|
|
|
|
m_directory_pages[3] = PhysicalPage::create(boot_pd3_paddr, true, false);
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
|
2019-04-03 13:13:07 +00:00
|
|
|
{
|
2021-05-19 14:35:09 +00:00
|
|
|
constexpr FlatPtr userspace_range_base = 0x00800000;
|
|
|
|
constexpr FlatPtr userspace_range_ceiling = 0xbe000000;
|
|
|
|
|
2020-07-06 15:11:52 +00:00
|
|
|
ScopedSpinLock lock(s_mm_lock);
|
2020-01-17 22:05:37 +00:00
|
|
|
if (parent_range_allocator) {
|
|
|
|
m_range_allocator.initialize_from_parent(*parent_range_allocator);
|
|
|
|
} else {
|
AK: Rename KB, MB, GB to KiB, MiB, GiB
The SI prefixes "k", "M", "G" mean "10^3", "10^6", "10^9".
The IEC prefixes "Ki", "Mi", "Gi" mean "2^10", "2^20", "2^30".
Let's use the correct name, at least in code.
Only changes the name of the constants, no other behavior change.
2020-08-15 17:55:00 +00:00
|
|
|
size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
|
2020-01-17 22:05:37 +00:00
|
|
|
u32 base = userspace_range_base + random_offset;
|
|
|
|
m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
|
|
|
|
}
|
|
|
|
|
2019-12-25 10:22:16 +00:00
|
|
|
// Set up a userspace page directory
|
2020-01-17 21:30:52 +00:00
|
|
|
m_directory_table = MM.allocate_user_physical_page();
|
2020-09-27 14:10:10 +00:00
|
|
|
if (!m_directory_table)
|
|
|
|
return;
|
2020-01-17 21:18:56 +00:00
|
|
|
m_directory_pages[0] = MM.allocate_user_physical_page();
|
2020-09-27 14:10:10 +00:00
|
|
|
if (!m_directory_pages[0])
|
|
|
|
return;
|
2020-01-17 21:18:56 +00:00
|
|
|
m_directory_pages[1] = MM.allocate_user_physical_page();
|
2020-09-27 14:10:10 +00:00
|
|
|
if (!m_directory_pages[1])
|
|
|
|
return;
|
2020-01-17 21:18:56 +00:00
|
|
|
m_directory_pages[2] = MM.allocate_user_physical_page();
|
2020-09-27 14:10:10 +00:00
|
|
|
if (!m_directory_pages[2])
|
|
|
|
return;
|
2020-08-15 18:35:00 +00:00
|
|
|
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=0xc0000000)
|
2019-12-25 10:22:16 +00:00
|
|
|
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
|
|
|
|
|
2020-01-17 21:30:52 +00:00
|
|
|
{
|
|
|
|
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*m_directory_table);
|
2020-12-30 17:31:25 +00:00
|
|
|
table.raw[0] = (FlatPtr)m_directory_pages[0]->paddr().as_ptr() | 1;
|
|
|
|
table.raw[1] = (FlatPtr)m_directory_pages[1]->paddr().as_ptr() | 1;
|
|
|
|
table.raw[2] = (FlatPtr)m_directory_pages[2]->paddr().as_ptr() | 1;
|
|
|
|
table.raw[3] = (FlatPtr)m_directory_pages[3]->paddr().as_ptr() | 1;
|
|
|
|
|
|
|
|
// 2 ** MAXPHYADDR - 1
|
|
|
|
// Where MAXPHYADDR = physical_address_bit_width
|
|
|
|
u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
|
|
|
|
|
|
|
|
// bit 63 = no execute
|
|
|
|
// bit 7 = page size
|
|
|
|
// bit 5 = accessed
|
|
|
|
// bit 4 = cache disable
|
|
|
|
// bit 3 = write through
|
|
|
|
// bit 2 = user/supervisor
|
|
|
|
// bit 1 = read/write
|
|
|
|
// bit 0 = present
|
|
|
|
constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
|
|
|
|
|
|
|
|
// This is to notify us of bugs where we're:
|
|
|
|
// 1. Going over what the processor is capable of.
|
|
|
|
// 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
|
|
|
|
// when writing out the PDPT pointer to CR3.
|
|
|
|
// The reason we're not checking the page directory's physical address directly is because
|
|
|
|
// we're checking for sign extension when putting it into a PDPTE. See issue #4584.
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address);
|
|
|
|
VERIFY((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address);
|
|
|
|
VERIFY((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
|
|
|
|
VERIFY((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
|
2020-12-30 17:31:25 +00:00
|
|
|
|
2020-01-17 21:30:52 +00:00
|
|
|
MM.unquickmap_page();
|
|
|
|
}
|
2019-12-25 10:22:16 +00:00
|
|
|
|
2021-01-31 18:00:53 +00:00
|
|
|
// Clone bottom 2 MiB of mappings from kernel_page_directory
|
|
|
|
PageDirectoryEntry buffer;
|
|
|
|
auto* kernel_pd = MM.quickmap_pd(MM.kernel_page_directory(), 0);
|
|
|
|
memcpy(&buffer, kernel_pd, sizeof(PageDirectoryEntry));
|
|
|
|
auto* new_pd = MM.quickmap_pd(*this, 0);
|
|
|
|
memcpy(new_pd, &buffer, sizeof(PageDirectoryEntry));
|
|
|
|
|
2021-02-08 14:45:40 +00:00
|
|
|
// If we got here, we successfully created it. Set m_space now
|
|
|
|
m_valid = true;
|
2020-09-27 14:10:10 +00:00
|
|
|
|
2019-12-25 01:46:17 +00:00
|
|
|
cr3_map().set(cr3(), this);
|
2019-04-03 13:13:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PageDirectory::~PageDirectory()
|
|
|
|
{
|
2020-07-06 15:11:52 +00:00
|
|
|
ScopedSpinLock lock(s_mm_lock);
|
2021-02-08 14:45:40 +00:00
|
|
|
if (m_space)
|
2020-09-27 14:10:10 +00:00
|
|
|
cr3_map().remove(cr3());
|
2020-01-17 18:59:20 +00:00
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
|
|
|
|
}
|